From 70d9b72a87f17736784fc91764fe3ffed9dee8a6 Mon Sep 17 00:00:00 2001 From: Alberto Donizetti Date: Sat, 25 Apr 2020 18:46:29 +0200 Subject: [PATCH] cmd/compile: convert more arm64 lowering rules to typed aux Passes GOARCH=arm64 gotip build -toolexec 'toolstash -cmp' -a std Change-Id: Idc0ac2638c7a9b840ba2d6f4bba2e9c5df24c807 Reviewed-on: https://go-review.googlesource.com/c/go/+/230177 Reviewed-by: Keith Randall --- src/cmd/compile/internal/ssa/gen/ARM64.rules | 200 ++++++------- src/cmd/compile/internal/ssa/rewriteARM64.go | 294 +++++++++---------- 2 files changed, 247 insertions(+), 247 deletions(-) diff --git a/src/cmd/compile/internal/ssa/gen/ARM64.rules b/src/cmd/compile/internal/ssa/gen/ARM64.rules index 9fec8c55266..a51666a6c8d 100644 --- a/src/cmd/compile/internal/ssa/gen/ARM64.rules +++ b/src/cmd/compile/internal/ssa/gen/ARM64.rules @@ -315,95 +315,95 @@ (LocalAddr {sym} base _) => (MOVDaddr {sym} base) // loads -(Load ptr mem) && t.IsBoolean() -> (MOVBUload ptr mem) -(Load ptr mem) && (is8BitInt(t) && isSigned(t)) -> (MOVBload ptr mem) -(Load ptr mem) && (is8BitInt(t) && !isSigned(t)) -> (MOVBUload ptr mem) -(Load ptr mem) && (is16BitInt(t) && isSigned(t)) -> (MOVHload ptr mem) -(Load ptr mem) && (is16BitInt(t) && !isSigned(t)) -> (MOVHUload ptr mem) -(Load ptr mem) && (is32BitInt(t) && isSigned(t)) -> (MOVWload ptr mem) -(Load ptr mem) && (is32BitInt(t) && !isSigned(t)) -> (MOVWUload ptr mem) -(Load ptr mem) && (is64BitInt(t) || isPtr(t)) -> (MOVDload ptr mem) -(Load ptr mem) && is32BitFloat(t) -> (FMOVSload ptr mem) -(Load ptr mem) && is64BitFloat(t) -> (FMOVDload ptr mem) +(Load ptr mem) && t.IsBoolean() => (MOVBUload ptr mem) +(Load ptr mem) && (is8BitInt(t) && isSigned(t)) => (MOVBload ptr mem) +(Load ptr mem) && (is8BitInt(t) && !isSigned(t)) => (MOVBUload ptr mem) +(Load ptr mem) && (is16BitInt(t) && isSigned(t)) => (MOVHload ptr mem) +(Load ptr mem) && (is16BitInt(t) && !isSigned(t)) => (MOVHUload ptr mem) +(Load ptr mem) && (is32BitInt(t) && isSigned(t)) => (MOVWload ptr mem) +(Load ptr mem) && (is32BitInt(t) && !isSigned(t)) => (MOVWUload ptr mem) +(Load ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVDload ptr mem) +(Load ptr mem) && is32BitFloat(t) => (FMOVSload ptr mem) +(Load ptr mem) && is64BitFloat(t) => (FMOVDload ptr mem) // stores -(Store {t} ptr val mem) && t.(*types.Type).Size() == 1 -> (MOVBstore ptr val mem) -(Store {t} ptr val mem) && t.(*types.Type).Size() == 2 -> (MOVHstore ptr val mem) -(Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type) -> (MOVWstore ptr val mem) -(Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && !is64BitFloat(val.Type) -> (MOVDstore ptr val mem) -(Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) -> (FMOVSstore ptr val mem) -(Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) -> (FMOVDstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 4 && !is32BitFloat(val.Type) => (MOVWstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 8 && !is64BitFloat(val.Type) => (MOVDstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 4 && is32BitFloat(val.Type) => (FMOVSstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 8 && is64BitFloat(val.Type) => (FMOVDstore ptr val mem) // zeroing -(Zero [0] _ mem) -> mem -(Zero [1] ptr mem) -> (MOVBstore ptr (MOVDconst [0]) mem) -(Zero [2] ptr mem) -> (MOVHstore ptr (MOVDconst [0]) mem) -(Zero [4] ptr mem) -> (MOVWstore ptr (MOVDconst [0]) mem) -(Zero [8] ptr mem) -> (MOVDstore ptr (MOVDconst [0]) mem) +(Zero [0] _ mem) => mem +(Zero [1] ptr mem) => (MOVBstore ptr (MOVDconst [0]) mem) +(Zero [2] ptr mem) => (MOVHstore ptr (MOVDconst [0]) mem) +(Zero [4] ptr mem) => (MOVWstore ptr (MOVDconst [0]) mem) +(Zero [8] ptr mem) => (MOVDstore ptr (MOVDconst [0]) mem) -(Zero [3] ptr mem) -> +(Zero [3] ptr mem) => (MOVBstore [2] ptr (MOVDconst [0]) (MOVHstore ptr (MOVDconst [0]) mem)) -(Zero [5] ptr mem) -> +(Zero [5] ptr mem) => (MOVBstore [4] ptr (MOVDconst [0]) (MOVWstore ptr (MOVDconst [0]) mem)) -(Zero [6] ptr mem) -> +(Zero [6] ptr mem) => (MOVHstore [4] ptr (MOVDconst [0]) (MOVWstore ptr (MOVDconst [0]) mem)) -(Zero [7] ptr mem) -> +(Zero [7] ptr mem) => (MOVBstore [6] ptr (MOVDconst [0]) (MOVHstore [4] ptr (MOVDconst [0]) (MOVWstore ptr (MOVDconst [0]) mem))) -(Zero [9] ptr mem) -> +(Zero [9] ptr mem) => (MOVBstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem)) -(Zero [10] ptr mem) -> +(Zero [10] ptr mem) => (MOVHstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem)) -(Zero [11] ptr mem) -> +(Zero [11] ptr mem) => (MOVBstore [10] ptr (MOVDconst [0]) (MOVHstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem))) -(Zero [12] ptr mem) -> +(Zero [12] ptr mem) => (MOVWstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem)) -(Zero [13] ptr mem) -> +(Zero [13] ptr mem) => (MOVBstore [12] ptr (MOVDconst [0]) (MOVWstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem))) -(Zero [14] ptr mem) -> +(Zero [14] ptr mem) => (MOVHstore [12] ptr (MOVDconst [0]) (MOVWstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem))) -(Zero [15] ptr mem) -> +(Zero [15] ptr mem) => (MOVBstore [14] ptr (MOVDconst [0]) (MOVHstore [12] ptr (MOVDconst [0]) (MOVWstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem)))) -(Zero [16] ptr mem) -> +(Zero [16] ptr mem) => (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem) -(Zero [32] ptr mem) -> +(Zero [32] ptr mem) => (STP [16] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem)) -(Zero [48] ptr mem) -> +(Zero [48] ptr mem) => (STP [32] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [16] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem))) -(Zero [64] ptr mem) -> +(Zero [64] ptr mem) => (STP [48] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [32] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [16] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem)))) // strip off fractional word zeroing -(Zero [s] ptr mem) && s%16 != 0 && s%16 <= 8 && s > 16 -> +(Zero [s] ptr mem) && s%16 != 0 && s%16 <= 8 && s > 16 => (Zero [8] (OffPtr ptr [s-8]) (Zero [s-s%16] ptr mem)) -(Zero [s] ptr mem) && s%16 != 0 && s%16 > 8 && s > 16 -> +(Zero [s] ptr mem) && s%16 != 0 && s%16 > 8 && s > 16 => (Zero [16] (OffPtr ptr [s-16]) (Zero [s-s%16] ptr mem)) @@ -412,50 +412,50 @@ // 4, 16, and 64 are magic constants, see runtime/mkduff.go (Zero [s] ptr mem) && s%16 == 0 && s > 64 && s <= 16*64 - && !config.noDuffDevice -> + && !config.noDuffDevice => (DUFFZERO [4 * (64 - s/16)] ptr mem) // large zeroing uses a loop (Zero [s] ptr mem) - && s%16 == 0 && (s > 16*64 || config.noDuffDevice) -> + && s%16 == 0 && (s > 16*64 || config.noDuffDevice) => (LoweredZero ptr (ADDconst [s-16] ptr) mem) // moves -(Move [0] _ _ mem) -> mem -(Move [1] dst src mem) -> (MOVBstore dst (MOVBUload src mem) mem) -(Move [2] dst src mem) -> (MOVHstore dst (MOVHUload src mem) mem) -(Move [4] dst src mem) -> (MOVWstore dst (MOVWUload src mem) mem) -(Move [8] dst src mem) -> (MOVDstore dst (MOVDload src mem) mem) +(Move [0] _ _ mem) => mem +(Move [1] dst src mem) => (MOVBstore dst (MOVBUload src mem) mem) +(Move [2] dst src mem) => (MOVHstore dst (MOVHUload src mem) mem) +(Move [4] dst src mem) => (MOVWstore dst (MOVWUload src mem) mem) +(Move [8] dst src mem) => (MOVDstore dst (MOVDload src mem) mem) -(Move [3] dst src mem) -> +(Move [3] dst src mem) => (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVHstore dst (MOVHUload src mem) mem)) -(Move [5] dst src mem) -> +(Move [5] dst src mem) => (MOVBstore [4] dst (MOVBUload [4] src mem) (MOVWstore dst (MOVWUload src mem) mem)) -(Move [6] dst src mem) -> +(Move [6] dst src mem) => (MOVHstore [4] dst (MOVHUload [4] src mem) (MOVWstore dst (MOVWUload src mem) mem)) -(Move [7] dst src mem) -> +(Move [7] dst src mem) => (MOVBstore [6] dst (MOVBUload [6] src mem) (MOVHstore [4] dst (MOVHUload [4] src mem) (MOVWstore dst (MOVWUload src mem) mem))) -(Move [12] dst src mem) -> +(Move [12] dst src mem) => (MOVWstore [8] dst (MOVWUload [8] src mem) (MOVDstore dst (MOVDload src mem) mem)) -(Move [16] dst src mem) -> +(Move [16] dst src mem) => (MOVDstore [8] dst (MOVDload [8] src mem) (MOVDstore dst (MOVDload src mem) mem)) -(Move [24] dst src mem) -> +(Move [24] dst src mem) => (MOVDstore [16] dst (MOVDload [16] src mem) (MOVDstore [8] dst (MOVDload [8] src mem) (MOVDstore dst (MOVDload src mem) mem))) // strip off fractional word move -(Move [s] dst src mem) && s%8 != 0 && s > 8 -> +(Move [s] dst src mem) && s%8 != 0 && s > 8 => (Move [s%8] (OffPtr dst [s-s%8]) (OffPtr src [s-s%8]) @@ -464,12 +464,12 @@ // medium move uses a duff device (Move [s] dst src mem) && s > 32 && s <= 16*64 && s%16 == 8 - && !config.noDuffDevice && logLargeCopy(v, s) -> - (MOVDstore [s-8] dst (MOVDload [s-8] src mem) + && !config.noDuffDevice && logLargeCopy(v, s) => + (MOVDstore [int32(s-8)] dst (MOVDload [int32(s-8)] src mem) (DUFFCOPY [8*(64-(s-8)/16)] dst src mem)) (Move [s] dst src mem) && s > 32 && s <= 16*64 && s%16 == 0 - && !config.noDuffDevice && logLargeCopy(v, s) -> + && !config.noDuffDevice && logLargeCopy(v, s) => (DUFFCOPY [8 * (64 - s/16)] dst src mem) // 8 is the number of bytes to encode: // @@ -480,7 +480,7 @@ // large move uses a loop (Move [s] dst src mem) - && s > 24 && s%8 == 0 && logLargeCopy(v, s) -> + && s > 24 && s%8 == 0 && logLargeCopy(v, s) => (LoweredMove dst src @@ -488,67 +488,67 @@ mem) // calls -(StaticCall ...) -> (CALLstatic ...) -(ClosureCall ...) -> (CALLclosure ...) -(InterCall ...) -> (CALLinter ...) +(StaticCall ...) => (CALLstatic ...) +(ClosureCall ...) => (CALLclosure ...) +(InterCall ...) => (CALLinter ...) // checks -(NilCheck ...) -> (LoweredNilCheck ...) -(IsNonNil ptr) -> (NotEqual (CMPconst [0] ptr)) -(IsInBounds idx len) -> (LessThanU (CMP idx len)) -(IsSliceInBounds idx len) -> (LessEqualU (CMP idx len)) +(NilCheck ...) => (LoweredNilCheck ...) +(IsNonNil ptr) => (NotEqual (CMPconst [0] ptr)) +(IsInBounds idx len) => (LessThanU (CMP idx len)) +(IsSliceInBounds idx len) => (LessEqualU (CMP idx len)) // pseudo-ops -(GetClosurePtr ...) -> (LoweredGetClosurePtr ...) -(GetCallerSP ...) -> (LoweredGetCallerSP ...) -(GetCallerPC ...) -> (LoweredGetCallerPC ...) +(GetClosurePtr ...) => (LoweredGetClosurePtr ...) +(GetCallerSP ...) => (LoweredGetCallerSP ...) +(GetCallerPC ...) => (LoweredGetCallerPC ...) // Absorb pseudo-ops into blocks. -(If (Equal cc) yes no) -> (EQ cc yes no) -(If (NotEqual cc) yes no) -> (NE cc yes no) -(If (LessThan cc) yes no) -> (LT cc yes no) -(If (LessThanU cc) yes no) -> (ULT cc yes no) -(If (LessEqual cc) yes no) -> (LE cc yes no) -(If (LessEqualU cc) yes no) -> (ULE cc yes no) -(If (GreaterThan cc) yes no) -> (GT cc yes no) -(If (GreaterThanU cc) yes no) -> (UGT cc yes no) -(If (GreaterEqual cc) yes no) -> (GE cc yes no) -(If (GreaterEqualU cc) yes no) -> (UGE cc yes no) -(If (LessThanF cc) yes no) -> (FLT cc yes no) -(If (LessEqualF cc) yes no) -> (FLE cc yes no) -(If (GreaterThanF cc) yes no) -> (FGT cc yes no) -(If (GreaterEqualF cc) yes no) -> (FGE cc yes no) +(If (Equal cc) yes no) => (EQ cc yes no) +(If (NotEqual cc) yes no) => (NE cc yes no) +(If (LessThan cc) yes no) => (LT cc yes no) +(If (LessThanU cc) yes no) => (ULT cc yes no) +(If (LessEqual cc) yes no) => (LE cc yes no) +(If (LessEqualU cc) yes no) => (ULE cc yes no) +(If (GreaterThan cc) yes no) => (GT cc yes no) +(If (GreaterThanU cc) yes no) => (UGT cc yes no) +(If (GreaterEqual cc) yes no) => (GE cc yes no) +(If (GreaterEqualU cc) yes no) => (UGE cc yes no) +(If (LessThanF cc) yes no) => (FLT cc yes no) +(If (LessEqualF cc) yes no) => (FLE cc yes no) +(If (GreaterThanF cc) yes no) => (FGT cc yes no) +(If (GreaterEqualF cc) yes no) => (FGE cc yes no) -(If cond yes no) -> (NZ cond yes no) +(If cond yes no) => (NZ cond yes no) // atomic intrinsics // Note: these ops do not accept offset. -(AtomicLoad8 ...) -> (LDARB ...) -(AtomicLoad32 ...) -> (LDARW ...) -(AtomicLoad64 ...) -> (LDAR ...) -(AtomicLoadPtr ...) -> (LDAR ...) +(AtomicLoad8 ...) => (LDARB ...) +(AtomicLoad32 ...) => (LDARW ...) +(AtomicLoad64 ...) => (LDAR ...) +(AtomicLoadPtr ...) => (LDAR ...) -(AtomicStore8 ...) -> (STLRB ...) -(AtomicStore32 ...) -> (STLRW ...) -(AtomicStore64 ...) -> (STLR ...) -(AtomicStorePtrNoWB ...) -> (STLR ...) +(AtomicStore8 ...) => (STLRB ...) +(AtomicStore32 ...) => (STLRW ...) +(AtomicStore64 ...) => (STLR ...) +(AtomicStorePtrNoWB ...) => (STLR ...) -(AtomicExchange(32|64) ...) -> (LoweredAtomicExchange(32|64) ...) -(AtomicAdd(32|64) ...) -> (LoweredAtomicAdd(32|64) ...) -(AtomicCompareAndSwap(32|64) ...) -> (LoweredAtomicCas(32|64) ...) +(AtomicExchange(32|64) ...) => (LoweredAtomicExchange(32|64) ...) +(AtomicAdd(32|64) ...) => (LoweredAtomicAdd(32|64) ...) +(AtomicCompareAndSwap(32|64) ...) => (LoweredAtomicCas(32|64) ...) // Currently the updated value is not used, but we need a register to temporarily hold it. -(AtomicAnd8 ptr val mem) -> (Select1 (LoweredAtomicAnd8 ptr val mem)) -(AtomicOr8 ptr val mem) -> (Select1 (LoweredAtomicOr8 ptr val mem)) +(AtomicAnd8 ptr val mem) => (Select1 (LoweredAtomicAnd8 ptr val mem)) +(AtomicOr8 ptr val mem) => (Select1 (LoweredAtomicOr8 ptr val mem)) -(AtomicAdd(32|64)Variant ...) -> (LoweredAtomicAdd(32|64)Variant ...) +(AtomicAdd(32|64)Variant ...) => (LoweredAtomicAdd(32|64)Variant ...) // Write barrier. -(WB ...) -> (LoweredWB ...) +(WB ...) => (LoweredWB ...) -(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 -> (LoweredPanicBoundsA [kind] x y mem) -(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 -> (LoweredPanicBoundsB [kind] x y mem) -(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 -> (LoweredPanicBoundsC [kind] x y mem) +(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem) +(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem) +(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem) // Optimizations diff --git a/src/cmd/compile/internal/ssa/rewriteARM64.go b/src/cmd/compile/internal/ssa/rewriteARM64.go index 2c18a70581f..f0af125e9ed 100644 --- a/src/cmd/compile/internal/ssa/rewriteARM64.go +++ b/src/cmd/compile/internal/ssa/rewriteARM64.go @@ -22473,7 +22473,7 @@ func rewriteValueARM64_OpIsNonNil(v *Value) bool { ptr := v_0 v.reset(OpARM64NotEqual) v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v0.AuxInt = 0 + v0.AuxInt = int64ToAuxInt(0) v0.AddArg(ptr) v.AddArg(v0) return true @@ -23529,7 +23529,7 @@ func rewriteValueARM64_OpMove(v *Value) bool { // match: (Move [0] _ _ mem) // result: mem for { - if v.AuxInt != 0 { + if auxIntToInt64(v.AuxInt) != 0 { break } mem := v_2 @@ -23539,7 +23539,7 @@ func rewriteValueARM64_OpMove(v *Value) bool { // match: (Move [1] dst src mem) // result: (MOVBstore dst (MOVBUload src mem) mem) for { - if v.AuxInt != 1 { + if auxIntToInt64(v.AuxInt) != 1 { break } dst := v_0 @@ -23554,7 +23554,7 @@ func rewriteValueARM64_OpMove(v *Value) bool { // match: (Move [2] dst src mem) // result: (MOVHstore dst (MOVHUload src mem) mem) for { - if v.AuxInt != 2 { + if auxIntToInt64(v.AuxInt) != 2 { break } dst := v_0 @@ -23569,7 +23569,7 @@ func rewriteValueARM64_OpMove(v *Value) bool { // match: (Move [4] dst src mem) // result: (MOVWstore dst (MOVWUload src mem) mem) for { - if v.AuxInt != 4 { + if auxIntToInt64(v.AuxInt) != 4 { break } dst := v_0 @@ -23584,7 +23584,7 @@ func rewriteValueARM64_OpMove(v *Value) bool { // match: (Move [8] dst src mem) // result: (MOVDstore dst (MOVDload src mem) mem) for { - if v.AuxInt != 8 { + if auxIntToInt64(v.AuxInt) != 8 { break } dst := v_0 @@ -23599,16 +23599,16 @@ func rewriteValueARM64_OpMove(v *Value) bool { // match: (Move [3] dst src mem) // result: (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVHstore dst (MOVHUload src mem) mem)) for { - if v.AuxInt != 3 { + if auxIntToInt64(v.AuxInt) != 3 { break } dst := v_0 src := v_1 mem := v_2 v.reset(OpARM64MOVBstore) - v.AuxInt = 2 + v.AuxInt = int32ToAuxInt(2) v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, typ.UInt8) - v0.AuxInt = 2 + v0.AuxInt = int32ToAuxInt(2) v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem) v2 := b.NewValue0(v.Pos, OpARM64MOVHUload, typ.UInt16) @@ -23620,16 +23620,16 @@ func rewriteValueARM64_OpMove(v *Value) bool { // match: (Move [5] dst src mem) // result: (MOVBstore [4] dst (MOVBUload [4] src mem) (MOVWstore dst (MOVWUload src mem) mem)) for { - if v.AuxInt != 5 { + if auxIntToInt64(v.AuxInt) != 5 { break } dst := v_0 src := v_1 mem := v_2 v.reset(OpARM64MOVBstore) - v.AuxInt = 4 + v.AuxInt = int32ToAuxInt(4) v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, typ.UInt8) - v0.AuxInt = 4 + v0.AuxInt = int32ToAuxInt(4) v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) v2 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32) @@ -23641,16 +23641,16 @@ func rewriteValueARM64_OpMove(v *Value) bool { // match: (Move [6] dst src mem) // result: (MOVHstore [4] dst (MOVHUload [4] src mem) (MOVWstore dst (MOVWUload src mem) mem)) for { - if v.AuxInt != 6 { + if auxIntToInt64(v.AuxInt) != 6 { break } dst := v_0 src := v_1 mem := v_2 v.reset(OpARM64MOVHstore) - v.AuxInt = 4 + v.AuxInt = int32ToAuxInt(4) v0 := b.NewValue0(v.Pos, OpARM64MOVHUload, typ.UInt16) - v0.AuxInt = 4 + v0.AuxInt = int32ToAuxInt(4) v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) v2 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32) @@ -23662,21 +23662,21 @@ func rewriteValueARM64_OpMove(v *Value) bool { // match: (Move [7] dst src mem) // result: (MOVBstore [6] dst (MOVBUload [6] src mem) (MOVHstore [4] dst (MOVHUload [4] src mem) (MOVWstore dst (MOVWUload src mem) mem))) for { - if v.AuxInt != 7 { + if auxIntToInt64(v.AuxInt) != 7 { break } dst := v_0 src := v_1 mem := v_2 v.reset(OpARM64MOVBstore) - v.AuxInt = 6 + v.AuxInt = int32ToAuxInt(6) v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, typ.UInt8) - v0.AuxInt = 6 + v0.AuxInt = int32ToAuxInt(6) v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem) - v1.AuxInt = 4 + v1.AuxInt = int32ToAuxInt(4) v2 := b.NewValue0(v.Pos, OpARM64MOVHUload, typ.UInt16) - v2.AuxInt = 4 + v2.AuxInt = int32ToAuxInt(4) v2.AddArg2(src, mem) v3 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) v4 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32) @@ -23689,16 +23689,16 @@ func rewriteValueARM64_OpMove(v *Value) bool { // match: (Move [12] dst src mem) // result: (MOVWstore [8] dst (MOVWUload [8] src mem) (MOVDstore dst (MOVDload src mem) mem)) for { - if v.AuxInt != 12 { + if auxIntToInt64(v.AuxInt) != 12 { break } dst := v_0 src := v_1 mem := v_2 v.reset(OpARM64MOVWstore) - v.AuxInt = 8 + v.AuxInt = int32ToAuxInt(8) v0 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32) - v0.AuxInt = 8 + v0.AuxInt = int32ToAuxInt(8) v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) v2 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) @@ -23710,16 +23710,16 @@ func rewriteValueARM64_OpMove(v *Value) bool { // match: (Move [16] dst src mem) // result: (MOVDstore [8] dst (MOVDload [8] src mem) (MOVDstore dst (MOVDload src mem) mem)) for { - if v.AuxInt != 16 { + if auxIntToInt64(v.AuxInt) != 16 { break } dst := v_0 src := v_1 mem := v_2 v.reset(OpARM64MOVDstore) - v.AuxInt = 8 + v.AuxInt = int32ToAuxInt(8) v0 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) - v0.AuxInt = 8 + v0.AuxInt = int32ToAuxInt(8) v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) v2 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) @@ -23731,21 +23731,21 @@ func rewriteValueARM64_OpMove(v *Value) bool { // match: (Move [24] dst src mem) // result: (MOVDstore [16] dst (MOVDload [16] src mem) (MOVDstore [8] dst (MOVDload [8] src mem) (MOVDstore dst (MOVDload src mem) mem))) for { - if v.AuxInt != 24 { + if auxIntToInt64(v.AuxInt) != 24 { break } dst := v_0 src := v_1 mem := v_2 v.reset(OpARM64MOVDstore) - v.AuxInt = 16 + v.AuxInt = int32ToAuxInt(16) v0 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) - v0.AuxInt = 16 + v0.AuxInt = int32ToAuxInt(16) v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) - v1.AuxInt = 8 + v1.AuxInt = int32ToAuxInt(8) v2 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) - v2.AuxInt = 8 + v2.AuxInt = int32ToAuxInt(8) v2.AddArg2(src, mem) v3 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) v4 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) @@ -23759,7 +23759,7 @@ func rewriteValueARM64_OpMove(v *Value) bool { // cond: s%8 != 0 && s > 8 // result: (Move [s%8] (OffPtr dst [s-s%8]) (OffPtr src [s-s%8]) (Move [s-s%8] dst src mem)) for { - s := v.AuxInt + s := auxIntToInt64(v.AuxInt) dst := v_0 src := v_1 mem := v_2 @@ -23767,24 +23767,24 @@ func rewriteValueARM64_OpMove(v *Value) bool { break } v.reset(OpMove) - v.AuxInt = s % 8 + v.AuxInt = int64ToAuxInt(s % 8) v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) - v0.AuxInt = s - s%8 + v0.AuxInt = int64ToAuxInt(s - s%8) v0.AddArg(dst) v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) - v1.AuxInt = s - s%8 + v1.AuxInt = int64ToAuxInt(s - s%8) v1.AddArg(src) v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) - v2.AuxInt = s - s%8 + v2.AuxInt = int64ToAuxInt(s - s%8) v2.AddArg3(dst, src, mem) v.AddArg3(v0, v1, v2) return true } // match: (Move [s] dst src mem) // cond: s > 32 && s <= 16*64 && s%16 == 8 && !config.noDuffDevice && logLargeCopy(v, s) - // result: (MOVDstore [s-8] dst (MOVDload [s-8] src mem) (DUFFCOPY [8*(64-(s-8)/16)] dst src mem)) + // result: (MOVDstore [int32(s-8)] dst (MOVDload [int32(s-8)] src mem) (DUFFCOPY [8*(64-(s-8)/16)] dst src mem)) for { - s := v.AuxInt + s := auxIntToInt64(v.AuxInt) dst := v_0 src := v_1 mem := v_2 @@ -23792,12 +23792,12 @@ func rewriteValueARM64_OpMove(v *Value) bool { break } v.reset(OpARM64MOVDstore) - v.AuxInt = s - 8 + v.AuxInt = int32ToAuxInt(int32(s - 8)) v0 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) - v0.AuxInt = s - 8 + v0.AuxInt = int32ToAuxInt(int32(s - 8)) v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpARM64DUFFCOPY, types.TypeMem) - v1.AuxInt = 8 * (64 - (s-8)/16) + v1.AuxInt = int64ToAuxInt(8 * (64 - (s-8)/16)) v1.AddArg3(dst, src, mem) v.AddArg3(dst, v0, v1) return true @@ -23806,7 +23806,7 @@ func rewriteValueARM64_OpMove(v *Value) bool { // cond: s > 32 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice && logLargeCopy(v, s) // result: (DUFFCOPY [8 * (64 - s/16)] dst src mem) for { - s := v.AuxInt + s := auxIntToInt64(v.AuxInt) dst := v_0 src := v_1 mem := v_2 @@ -23814,7 +23814,7 @@ func rewriteValueARM64_OpMove(v *Value) bool { break } v.reset(OpARM64DUFFCOPY) - v.AuxInt = 8 * (64 - s/16) + v.AuxInt = int64ToAuxInt(8 * (64 - s/16)) v.AddArg3(dst, src, mem) return true } @@ -23822,7 +23822,7 @@ func rewriteValueARM64_OpMove(v *Value) bool { // cond: s > 24 && s%8 == 0 && logLargeCopy(v, s) // result: (LoweredMove dst src (ADDconst src [s-8]) mem) for { - s := v.AuxInt + s := auxIntToInt64(v.AuxInt) dst := v_0 src := v_1 mem := v_2 @@ -23831,7 +23831,7 @@ func rewriteValueARM64_OpMove(v *Value) bool { } v.reset(OpARM64LoweredMove) v0 := b.NewValue0(v.Pos, OpARM64ADDconst, src.Type) - v0.AuxInt = s - 8 + v0.AuxInt = int64ToAuxInt(s - 8) v0.AddArg(src) v.AddArg4(dst, src, v0, mem) return true @@ -24010,7 +24010,7 @@ func rewriteValueARM64_OpPanicBounds(v *Value) bool { // cond: boundsABI(kind) == 0 // result: (LoweredPanicBoundsA [kind] x y mem) for { - kind := v.AuxInt + kind := auxIntToInt64(v.AuxInt) x := v_0 y := v_1 mem := v_2 @@ -24018,7 +24018,7 @@ func rewriteValueARM64_OpPanicBounds(v *Value) bool { break } v.reset(OpARM64LoweredPanicBoundsA) - v.AuxInt = kind + v.AuxInt = int64ToAuxInt(kind) v.AddArg3(x, y, mem) return true } @@ -24026,7 +24026,7 @@ func rewriteValueARM64_OpPanicBounds(v *Value) bool { // cond: boundsABI(kind) == 1 // result: (LoweredPanicBoundsB [kind] x y mem) for { - kind := v.AuxInt + kind := auxIntToInt64(v.AuxInt) x := v_0 y := v_1 mem := v_2 @@ -24034,7 +24034,7 @@ func rewriteValueARM64_OpPanicBounds(v *Value) bool { break } v.reset(OpARM64LoweredPanicBoundsB) - v.AuxInt = kind + v.AuxInt = int64ToAuxInt(kind) v.AddArg3(x, y, mem) return true } @@ -24042,7 +24042,7 @@ func rewriteValueARM64_OpPanicBounds(v *Value) bool { // cond: boundsABI(kind) == 2 // result: (LoweredPanicBoundsC [kind] x y mem) for { - kind := v.AuxInt + kind := auxIntToInt64(v.AuxInt) x := v_0 y := v_1 mem := v_2 @@ -24050,7 +24050,7 @@ func rewriteValueARM64_OpPanicBounds(v *Value) bool { break } v.reset(OpARM64LoweredPanicBoundsC) - v.AuxInt = kind + v.AuxInt = int64ToAuxInt(kind) v.AddArg3(x, y, mem) return true } @@ -25178,14 +25178,14 @@ func rewriteValueARM64_OpStore(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (Store {t} ptr val mem) - // cond: t.(*types.Type).Size() == 1 + // cond: t.Size() == 1 // result: (MOVBstore ptr val mem) for { - t := v.Aux + t := auxToType(v.Aux) ptr := v_0 val := v_1 mem := v_2 - if !(t.(*types.Type).Size() == 1) { + if !(t.Size() == 1) { break } v.reset(OpARM64MOVBstore) @@ -25193,14 +25193,14 @@ func rewriteValueARM64_OpStore(v *Value) bool { return true } // match: (Store {t} ptr val mem) - // cond: t.(*types.Type).Size() == 2 + // cond: t.Size() == 2 // result: (MOVHstore ptr val mem) for { - t := v.Aux + t := auxToType(v.Aux) ptr := v_0 val := v_1 mem := v_2 - if !(t.(*types.Type).Size() == 2) { + if !(t.Size() == 2) { break } v.reset(OpARM64MOVHstore) @@ -25208,14 +25208,14 @@ func rewriteValueARM64_OpStore(v *Value) bool { return true } // match: (Store {t} ptr val mem) - // cond: t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type) + // cond: t.Size() == 4 && !is32BitFloat(val.Type) // result: (MOVWstore ptr val mem) for { - t := v.Aux + t := auxToType(v.Aux) ptr := v_0 val := v_1 mem := v_2 - if !(t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type)) { + if !(t.Size() == 4 && !is32BitFloat(val.Type)) { break } v.reset(OpARM64MOVWstore) @@ -25223,14 +25223,14 @@ func rewriteValueARM64_OpStore(v *Value) bool { return true } // match: (Store {t} ptr val mem) - // cond: t.(*types.Type).Size() == 8 && !is64BitFloat(val.Type) + // cond: t.Size() == 8 && !is64BitFloat(val.Type) // result: (MOVDstore ptr val mem) for { - t := v.Aux + t := auxToType(v.Aux) ptr := v_0 val := v_1 mem := v_2 - if !(t.(*types.Type).Size() == 8 && !is64BitFloat(val.Type)) { + if !(t.Size() == 8 && !is64BitFloat(val.Type)) { break } v.reset(OpARM64MOVDstore) @@ -25238,14 +25238,14 @@ func rewriteValueARM64_OpStore(v *Value) bool { return true } // match: (Store {t} ptr val mem) - // cond: t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) + // cond: t.Size() == 4 && is32BitFloat(val.Type) // result: (FMOVSstore ptr val mem) for { - t := v.Aux + t := auxToType(v.Aux) ptr := v_0 val := v_1 mem := v_2 - if !(t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)) { + if !(t.Size() == 4 && is32BitFloat(val.Type)) { break } v.reset(OpARM64FMOVSstore) @@ -25253,14 +25253,14 @@ func rewriteValueARM64_OpStore(v *Value) bool { return true } // match: (Store {t} ptr val mem) - // cond: t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) + // cond: t.Size() == 8 && is64BitFloat(val.Type) // result: (FMOVDstore ptr val mem) for { - t := v.Aux + t := auxToType(v.Aux) ptr := v_0 val := v_1 mem := v_2 - if !(t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)) { + if !(t.Size() == 8 && is64BitFloat(val.Type)) { break } v.reset(OpARM64FMOVDstore) @@ -25278,7 +25278,7 @@ func rewriteValueARM64_OpZero(v *Value) bool { // match: (Zero [0] _ mem) // result: mem for { - if v.AuxInt != 0 { + if auxIntToInt64(v.AuxInt) != 0 { break } mem := v_1 @@ -25288,71 +25288,71 @@ func rewriteValueARM64_OpZero(v *Value) bool { // match: (Zero [1] ptr mem) // result: (MOVBstore ptr (MOVDconst [0]) mem) for { - if v.AuxInt != 1 { + if auxIntToInt64(v.AuxInt) != 1 { break } ptr := v_0 mem := v_1 v.reset(OpARM64MOVBstore) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v0.AuxInt = 0 + v0.AuxInt = int64ToAuxInt(0) v.AddArg3(ptr, v0, mem) return true } // match: (Zero [2] ptr mem) // result: (MOVHstore ptr (MOVDconst [0]) mem) for { - if v.AuxInt != 2 { + if auxIntToInt64(v.AuxInt) != 2 { break } ptr := v_0 mem := v_1 v.reset(OpARM64MOVHstore) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v0.AuxInt = 0 + v0.AuxInt = int64ToAuxInt(0) v.AddArg3(ptr, v0, mem) return true } // match: (Zero [4] ptr mem) // result: (MOVWstore ptr (MOVDconst [0]) mem) for { - if v.AuxInt != 4 { + if auxIntToInt64(v.AuxInt) != 4 { break } ptr := v_0 mem := v_1 v.reset(OpARM64MOVWstore) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v0.AuxInt = 0 + v0.AuxInt = int64ToAuxInt(0) v.AddArg3(ptr, v0, mem) return true } // match: (Zero [8] ptr mem) // result: (MOVDstore ptr (MOVDconst [0]) mem) for { - if v.AuxInt != 8 { + if auxIntToInt64(v.AuxInt) != 8 { break } ptr := v_0 mem := v_1 v.reset(OpARM64MOVDstore) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v0.AuxInt = 0 + v0.AuxInt = int64ToAuxInt(0) v.AddArg3(ptr, v0, mem) return true } // match: (Zero [3] ptr mem) // result: (MOVBstore [2] ptr (MOVDconst [0]) (MOVHstore ptr (MOVDconst [0]) mem)) for { - if v.AuxInt != 3 { + if auxIntToInt64(v.AuxInt) != 3 { break } ptr := v_0 mem := v_1 v.reset(OpARM64MOVBstore) - v.AuxInt = 2 + v.AuxInt = int32ToAuxInt(2) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v0.AuxInt = 0 + v0.AuxInt = int64ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem) v1.AddArg3(ptr, v0, mem) v.AddArg3(ptr, v0, v1) @@ -25361,15 +25361,15 @@ func rewriteValueARM64_OpZero(v *Value) bool { // match: (Zero [5] ptr mem) // result: (MOVBstore [4] ptr (MOVDconst [0]) (MOVWstore ptr (MOVDconst [0]) mem)) for { - if v.AuxInt != 5 { + if auxIntToInt64(v.AuxInt) != 5 { break } ptr := v_0 mem := v_1 v.reset(OpARM64MOVBstore) - v.AuxInt = 4 + v.AuxInt = int32ToAuxInt(4) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v0.AuxInt = 0 + v0.AuxInt = int64ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) v1.AddArg3(ptr, v0, mem) v.AddArg3(ptr, v0, v1) @@ -25378,15 +25378,15 @@ func rewriteValueARM64_OpZero(v *Value) bool { // match: (Zero [6] ptr mem) // result: (MOVHstore [4] ptr (MOVDconst [0]) (MOVWstore ptr (MOVDconst [0]) mem)) for { - if v.AuxInt != 6 { + if auxIntToInt64(v.AuxInt) != 6 { break } ptr := v_0 mem := v_1 v.reset(OpARM64MOVHstore) - v.AuxInt = 4 + v.AuxInt = int32ToAuxInt(4) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v0.AuxInt = 0 + v0.AuxInt = int64ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) v1.AddArg3(ptr, v0, mem) v.AddArg3(ptr, v0, v1) @@ -25395,17 +25395,17 @@ func rewriteValueARM64_OpZero(v *Value) bool { // match: (Zero [7] ptr mem) // result: (MOVBstore [6] ptr (MOVDconst [0]) (MOVHstore [4] ptr (MOVDconst [0]) (MOVWstore ptr (MOVDconst [0]) mem))) for { - if v.AuxInt != 7 { + if auxIntToInt64(v.AuxInt) != 7 { break } ptr := v_0 mem := v_1 v.reset(OpARM64MOVBstore) - v.AuxInt = 6 + v.AuxInt = int32ToAuxInt(6) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v0.AuxInt = 0 + v0.AuxInt = int64ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem) - v1.AuxInt = 4 + v1.AuxInt = int32ToAuxInt(4) v2 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) v2.AddArg3(ptr, v0, mem) v1.AddArg3(ptr, v0, v2) @@ -25415,15 +25415,15 @@ func rewriteValueARM64_OpZero(v *Value) bool { // match: (Zero [9] ptr mem) // result: (MOVBstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem)) for { - if v.AuxInt != 9 { + if auxIntToInt64(v.AuxInt) != 9 { break } ptr := v_0 mem := v_1 v.reset(OpARM64MOVBstore) - v.AuxInt = 8 + v.AuxInt = int32ToAuxInt(8) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v0.AuxInt = 0 + v0.AuxInt = int64ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) v1.AddArg3(ptr, v0, mem) v.AddArg3(ptr, v0, v1) @@ -25432,15 +25432,15 @@ func rewriteValueARM64_OpZero(v *Value) bool { // match: (Zero [10] ptr mem) // result: (MOVHstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem)) for { - if v.AuxInt != 10 { + if auxIntToInt64(v.AuxInt) != 10 { break } ptr := v_0 mem := v_1 v.reset(OpARM64MOVHstore) - v.AuxInt = 8 + v.AuxInt = int32ToAuxInt(8) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v0.AuxInt = 0 + v0.AuxInt = int64ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) v1.AddArg3(ptr, v0, mem) v.AddArg3(ptr, v0, v1) @@ -25449,17 +25449,17 @@ func rewriteValueARM64_OpZero(v *Value) bool { // match: (Zero [11] ptr mem) // result: (MOVBstore [10] ptr (MOVDconst [0]) (MOVHstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem))) for { - if v.AuxInt != 11 { + if auxIntToInt64(v.AuxInt) != 11 { break } ptr := v_0 mem := v_1 v.reset(OpARM64MOVBstore) - v.AuxInt = 10 + v.AuxInt = int32ToAuxInt(10) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v0.AuxInt = 0 + v0.AuxInt = int64ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem) - v1.AuxInt = 8 + v1.AuxInt = int32ToAuxInt(8) v2 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) v2.AddArg3(ptr, v0, mem) v1.AddArg3(ptr, v0, v2) @@ -25469,15 +25469,15 @@ func rewriteValueARM64_OpZero(v *Value) bool { // match: (Zero [12] ptr mem) // result: (MOVWstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem)) for { - if v.AuxInt != 12 { + if auxIntToInt64(v.AuxInt) != 12 { break } ptr := v_0 mem := v_1 v.reset(OpARM64MOVWstore) - v.AuxInt = 8 + v.AuxInt = int32ToAuxInt(8) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v0.AuxInt = 0 + v0.AuxInt = int64ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) v1.AddArg3(ptr, v0, mem) v.AddArg3(ptr, v0, v1) @@ -25486,17 +25486,17 @@ func rewriteValueARM64_OpZero(v *Value) bool { // match: (Zero [13] ptr mem) // result: (MOVBstore [12] ptr (MOVDconst [0]) (MOVWstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem))) for { - if v.AuxInt != 13 { + if auxIntToInt64(v.AuxInt) != 13 { break } ptr := v_0 mem := v_1 v.reset(OpARM64MOVBstore) - v.AuxInt = 12 + v.AuxInt = int32ToAuxInt(12) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v0.AuxInt = 0 + v0.AuxInt = int64ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) - v1.AuxInt = 8 + v1.AuxInt = int32ToAuxInt(8) v2 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) v2.AddArg3(ptr, v0, mem) v1.AddArg3(ptr, v0, v2) @@ -25506,17 +25506,17 @@ func rewriteValueARM64_OpZero(v *Value) bool { // match: (Zero [14] ptr mem) // result: (MOVHstore [12] ptr (MOVDconst [0]) (MOVWstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem))) for { - if v.AuxInt != 14 { + if auxIntToInt64(v.AuxInt) != 14 { break } ptr := v_0 mem := v_1 v.reset(OpARM64MOVHstore) - v.AuxInt = 12 + v.AuxInt = int32ToAuxInt(12) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v0.AuxInt = 0 + v0.AuxInt = int64ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) - v1.AuxInt = 8 + v1.AuxInt = int32ToAuxInt(8) v2 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) v2.AddArg3(ptr, v0, mem) v1.AddArg3(ptr, v0, v2) @@ -25526,19 +25526,19 @@ func rewriteValueARM64_OpZero(v *Value) bool { // match: (Zero [15] ptr mem) // result: (MOVBstore [14] ptr (MOVDconst [0]) (MOVHstore [12] ptr (MOVDconst [0]) (MOVWstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem)))) for { - if v.AuxInt != 15 { + if auxIntToInt64(v.AuxInt) != 15 { break } ptr := v_0 mem := v_1 v.reset(OpARM64MOVBstore) - v.AuxInt = 14 + v.AuxInt = int32ToAuxInt(14) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v0.AuxInt = 0 + v0.AuxInt = int64ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem) - v1.AuxInt = 12 + v1.AuxInt = int32ToAuxInt(12) v2 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) - v2.AuxInt = 8 + v2.AuxInt = int32ToAuxInt(8) v3 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) v3.AddArg3(ptr, v0, mem) v2.AddArg3(ptr, v0, v3) @@ -25549,32 +25549,32 @@ func rewriteValueARM64_OpZero(v *Value) bool { // match: (Zero [16] ptr mem) // result: (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem) for { - if v.AuxInt != 16 { + if auxIntToInt64(v.AuxInt) != 16 { break } ptr := v_0 mem := v_1 v.reset(OpARM64STP) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v0.AuxInt = 0 + v0.AuxInt = int64ToAuxInt(0) v.AddArg4(ptr, v0, v0, mem) return true } // match: (Zero [32] ptr mem) // result: (STP [16] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem)) for { - if v.AuxInt != 32 { + if auxIntToInt64(v.AuxInt) != 32 { break } ptr := v_0 mem := v_1 v.reset(OpARM64STP) - v.AuxInt = 16 + v.AuxInt = int32ToAuxInt(16) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v0.AuxInt = 0 + v0.AuxInt = int64ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) - v1.AuxInt = 0 + v1.AuxInt = int32ToAuxInt(0) v1.AddArg4(ptr, v0, v0, mem) v.AddArg4(ptr, v0, v0, v1) return true @@ -25582,19 +25582,19 @@ func rewriteValueARM64_OpZero(v *Value) bool { // match: (Zero [48] ptr mem) // result: (STP [32] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [16] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem))) for { - if v.AuxInt != 48 { + if auxIntToInt64(v.AuxInt) != 48 { break } ptr := v_0 mem := v_1 v.reset(OpARM64STP) - v.AuxInt = 32 + v.AuxInt = int32ToAuxInt(32) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v0.AuxInt = 0 + v0.AuxInt = int64ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) - v1.AuxInt = 16 + v1.AuxInt = int32ToAuxInt(16) v2 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) - v2.AuxInt = 0 + v2.AuxInt = int32ToAuxInt(0) v2.AddArg4(ptr, v0, v0, mem) v1.AddArg4(ptr, v0, v0, v2) v.AddArg4(ptr, v0, v0, v1) @@ -25603,21 +25603,21 @@ func rewriteValueARM64_OpZero(v *Value) bool { // match: (Zero [64] ptr mem) // result: (STP [48] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [32] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [16] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem)))) for { - if v.AuxInt != 64 { + if auxIntToInt64(v.AuxInt) != 64 { break } ptr := v_0 mem := v_1 v.reset(OpARM64STP) - v.AuxInt = 48 + v.AuxInt = int32ToAuxInt(48) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v0.AuxInt = 0 + v0.AuxInt = int64ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) - v1.AuxInt = 32 + v1.AuxInt = int32ToAuxInt(32) v2 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) - v2.AuxInt = 16 + v2.AuxInt = int32ToAuxInt(16) v3 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) - v3.AuxInt = 0 + v3.AuxInt = int32ToAuxInt(0) v3.AddArg4(ptr, v0, v0, mem) v2.AddArg4(ptr, v0, v0, v3) v1.AddArg4(ptr, v0, v0, v2) @@ -25628,19 +25628,19 @@ func rewriteValueARM64_OpZero(v *Value) bool { // cond: s%16 != 0 && s%16 <= 8 && s > 16 // result: (Zero [8] (OffPtr ptr [s-8]) (Zero [s-s%16] ptr mem)) for { - s := v.AuxInt + s := auxIntToInt64(v.AuxInt) ptr := v_0 mem := v_1 if !(s%16 != 0 && s%16 <= 8 && s > 16) { break } v.reset(OpZero) - v.AuxInt = 8 + v.AuxInt = int64ToAuxInt(8) v0 := b.NewValue0(v.Pos, OpOffPtr, ptr.Type) - v0.AuxInt = s - 8 + v0.AuxInt = int64ToAuxInt(s - 8) v0.AddArg(ptr) v1 := b.NewValue0(v.Pos, OpZero, types.TypeMem) - v1.AuxInt = s - s%16 + v1.AuxInt = int64ToAuxInt(s - s%16) v1.AddArg2(ptr, mem) v.AddArg2(v0, v1) return true @@ -25649,19 +25649,19 @@ func rewriteValueARM64_OpZero(v *Value) bool { // cond: s%16 != 0 && s%16 > 8 && s > 16 // result: (Zero [16] (OffPtr ptr [s-16]) (Zero [s-s%16] ptr mem)) for { - s := v.AuxInt + s := auxIntToInt64(v.AuxInt) ptr := v_0 mem := v_1 if !(s%16 != 0 && s%16 > 8 && s > 16) { break } v.reset(OpZero) - v.AuxInt = 16 + v.AuxInt = int64ToAuxInt(16) v0 := b.NewValue0(v.Pos, OpOffPtr, ptr.Type) - v0.AuxInt = s - 16 + v0.AuxInt = int64ToAuxInt(s - 16) v0.AddArg(ptr) v1 := b.NewValue0(v.Pos, OpZero, types.TypeMem) - v1.AuxInt = s - s%16 + v1.AuxInt = int64ToAuxInt(s - s%16) v1.AddArg2(ptr, mem) v.AddArg2(v0, v1) return true @@ -25670,14 +25670,14 @@ func rewriteValueARM64_OpZero(v *Value) bool { // cond: s%16 == 0 && s > 64 && s <= 16*64 && !config.noDuffDevice // result: (DUFFZERO [4 * (64 - s/16)] ptr mem) for { - s := v.AuxInt + s := auxIntToInt64(v.AuxInt) ptr := v_0 mem := v_1 if !(s%16 == 0 && s > 64 && s <= 16*64 && !config.noDuffDevice) { break } v.reset(OpARM64DUFFZERO) - v.AuxInt = 4 * (64 - s/16) + v.AuxInt = int64ToAuxInt(4 * (64 - s/16)) v.AddArg2(ptr, mem) return true } @@ -25685,7 +25685,7 @@ func rewriteValueARM64_OpZero(v *Value) bool { // cond: s%16 == 0 && (s > 16*64 || config.noDuffDevice) // result: (LoweredZero ptr (ADDconst [s-16] ptr) mem) for { - s := v.AuxInt + s := auxIntToInt64(v.AuxInt) ptr := v_0 mem := v_1 if !(s%16 == 0 && (s > 16*64 || config.noDuffDevice)) { @@ -25693,7 +25693,7 @@ func rewriteValueARM64_OpZero(v *Value) bool { } v.reset(OpARM64LoweredZero) v0 := b.NewValue0(v.Pos, OpARM64ADDconst, ptr.Type) - v0.AuxInt = s - 16 + v0.AuxInt = int64ToAuxInt(s - 16) v0.AddArg(ptr) v.AddArg3(ptr, v0, mem) return true