From 67d40873adec0b70555840b2ef93e01716fe0730 Mon Sep 17 00:00:00 2001 From: Meng Zhuo Date: Mon, 20 Apr 2020 18:43:29 +0800 Subject: [PATCH] cmd/compile: adjust MIPS64x rewrite rules to use typed aux fields Pass toolstash-check Change-Id: I673c9a24bf69c09573be5aeddbd6072ef35d2d83 Reviewed-on: https://go-review.googlesource.com/c/go/+/228937 TryBot-Result: Gobot Gobot Reviewed-by: Keith Randall --- src/cmd/compile/internal/ssa/gen/MIPS64.rules | 772 +++++------ src/cmd/compile/internal/ssa/rewriteMIPS64.go | 1164 ++++++++--------- 2 files changed, 968 insertions(+), 968 deletions(-) diff --git a/src/cmd/compile/internal/ssa/gen/MIPS64.rules b/src/cmd/compile/internal/ssa/gen/MIPS64.rules index 18864b7ea2..9d319e0df2 100644 --- a/src/cmd/compile/internal/ssa/gen/MIPS64.rules +++ b/src/cmd/compile/internal/ssa/gen/MIPS64.rules @@ -2,217 +2,217 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -(Add(Ptr|64|32|16|8) ...) -> (ADDV ...) -(Add(32|64)F ...) -> (ADD(F|D) ...) +(Add(Ptr|64|32|16|8) ...) => (ADDV ...) +(Add(32|64)F ...) => (ADD(F|D) ...) -(Sub(Ptr|64|32|16|8) ...) -> (SUBV ...) -(Sub(32|64)F ...) -> (SUB(F|D) ...) +(Sub(Ptr|64|32|16|8) ...) => (SUBV ...) +(Sub(32|64)F ...) => (SUB(F|D) ...) -(Mul(64|32|16|8) x y) -> (Select1 (MULVU x y)) -(Mul(32|64)F ...) -> (MUL(F|D) ...) -(Mul64uhilo ...) -> (MULVU ...) +(Mul(64|32|16|8) x y) => (Select1 (MULVU x y)) +(Mul(32|64)F ...) => (MUL(F|D) ...) +(Mul64uhilo ...) => (MULVU ...) (Select0 (Mul64uover x y)) -> (Select1 (MULVU x y)) (Select1 (Mul64uover x y)) -> (SGTU (Select0 (MULVU x y)) (MOVVconst [0])) -(Hmul64 x y) -> (Select0 (MULV x y)) -(Hmul64u x y) -> (Select0 (MULVU x y)) -(Hmul32 x y) -> (SRAVconst (Select1 (MULV (SignExt32to64 x) (SignExt32to64 y))) [32]) -(Hmul32u x y) -> (SRLVconst (Select1 (MULVU (ZeroExt32to64 x) (ZeroExt32to64 y))) [32]) +(Hmul64 x y) => (Select0 (MULV x y)) +(Hmul64u x y) => (Select0 (MULVU x y)) +(Hmul32 x y) => (SRAVconst (Select1 (MULV (SignExt32to64 x) (SignExt32to64 y))) [32]) +(Hmul32u x y) => (SRLVconst (Select1 (MULVU (ZeroExt32to64 x) (ZeroExt32to64 y))) [32]) -(Div64 x y) -> (Select1 (DIVV x y)) -(Div64u x y) -> (Select1 (DIVVU x y)) -(Div32 x y) -> (Select1 (DIVV (SignExt32to64 x) (SignExt32to64 y))) -(Div32u x y) -> (Select1 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y))) -(Div16 x y) -> (Select1 (DIVV (SignExt16to64 x) (SignExt16to64 y))) -(Div16u x y) -> (Select1 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y))) -(Div8 x y) -> (Select1 (DIVV (SignExt8to64 x) (SignExt8to64 y))) -(Div8u x y) -> (Select1 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y))) -(Div(32|64)F ...) -> (DIV(F|D) ...) +(Div64 x y) => (Select1 (DIVV x y)) +(Div64u x y) => (Select1 (DIVVU x y)) +(Div32 x y) => (Select1 (DIVV (SignExt32to64 x) (SignExt32to64 y))) +(Div32u x y) => (Select1 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y))) +(Div16 x y) => (Select1 (DIVV (SignExt16to64 x) (SignExt16to64 y))) +(Div16u x y) => (Select1 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y))) +(Div8 x y) => (Select1 (DIVV (SignExt8to64 x) (SignExt8to64 y))) +(Div8u x y) => (Select1 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y))) +(Div(32|64)F ...) => (DIV(F|D) ...) -(Mod64 x y) -> (Select0 (DIVV x y)) -(Mod64u x y) -> (Select0 (DIVVU x y)) -(Mod32 x y) -> (Select0 (DIVV (SignExt32to64 x) (SignExt32to64 y))) -(Mod32u x y) -> (Select0 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y))) -(Mod16 x y) -> (Select0 (DIVV (SignExt16to64 x) (SignExt16to64 y))) -(Mod16u x y) -> (Select0 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y))) -(Mod8 x y) -> (Select0 (DIVV (SignExt8to64 x) (SignExt8to64 y))) -(Mod8u x y) -> (Select0 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y))) +(Mod64 x y) => (Select0 (DIVV x y)) +(Mod64u x y) => (Select0 (DIVVU x y)) +(Mod32 x y) => (Select0 (DIVV (SignExt32to64 x) (SignExt32to64 y))) +(Mod32u x y) => (Select0 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y))) +(Mod16 x y) => (Select0 (DIVV (SignExt16to64 x) (SignExt16to64 y))) +(Mod16u x y) => (Select0 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y))) +(Mod8 x y) => (Select0 (DIVV (SignExt8to64 x) (SignExt8to64 y))) +(Mod8u x y) => (Select0 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y))) // (x + y) / 2 with x>=y -> (x - y) / 2 + y (Avg64u x y) -> (ADDV (SRLVconst (SUBV x y) [1]) y) -(And(64|32|16|8) ...) -> (AND ...) -(Or(64|32|16|8) ...) -> (OR ...) -(Xor(64|32|16|8) ...) -> (XOR ...) +(And(64|32|16|8) ...) => (AND ...) +(Or(64|32|16|8) ...) => (OR ...) +(Xor(64|32|16|8) ...) => (XOR ...) // shifts // hardware instruction uses only the low 6 bits of the shift // we compare to 64 to ensure Go semantics for large shifts -(Lsh64x64 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) -(Lsh64x32 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) -(Lsh64x16 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) -(Lsh64x8 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) +(Lsh64x64 x y) => (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) +(Lsh64x32 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) +(Lsh64x16 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) +(Lsh64x8 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) -(Lsh32x64 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) -(Lsh32x32 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) -(Lsh32x16 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) -(Lsh32x8 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) +(Lsh32x64 x y) => (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) +(Lsh32x32 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) +(Lsh32x16 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) +(Lsh32x8 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) -(Lsh16x64 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) -(Lsh16x32 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) -(Lsh16x16 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) -(Lsh16x8 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) +(Lsh16x64 x y) => (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) +(Lsh16x32 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) +(Lsh16x16 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) +(Lsh16x8 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) -(Lsh8x64 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) -(Lsh8x32 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) -(Lsh8x16 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) -(Lsh8x8 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) +(Lsh8x64 x y) => (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) +(Lsh8x32 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) +(Lsh8x16 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) +(Lsh8x8 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) -(Rsh64Ux64 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV x y)) -(Rsh64Ux32 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV x (ZeroExt32to64 y))) -(Rsh64Ux16 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV x (ZeroExt16to64 y))) -(Rsh64Ux8 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV x (ZeroExt8to64 y))) +(Rsh64Ux64 x y) => (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV x y)) +(Rsh64Ux32 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV x (ZeroExt32to64 y))) +(Rsh64Ux16 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV x (ZeroExt16to64 y))) +(Rsh64Ux8 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV x (ZeroExt8to64 y))) -(Rsh32Ux64 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV (ZeroExt32to64 x) y)) -(Rsh32Ux32 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV (ZeroExt32to64 x) (ZeroExt32to64 y))) -(Rsh32Ux16 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV (ZeroExt32to64 x) (ZeroExt16to64 y))) -(Rsh32Ux8 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV (ZeroExt32to64 x) (ZeroExt8to64 y))) +(Rsh32Ux64 x y) => (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV (ZeroExt32to64 x) y)) +(Rsh32Ux32 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV (ZeroExt32to64 x) (ZeroExt32to64 y))) +(Rsh32Ux16 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV (ZeroExt32to64 x) (ZeroExt16to64 y))) +(Rsh32Ux8 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV (ZeroExt32to64 x) (ZeroExt8to64 y))) -(Rsh16Ux64 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV (ZeroExt16to64 x) y)) -(Rsh16Ux32 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV (ZeroExt16to64 x) (ZeroExt32to64 y))) -(Rsh16Ux16 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV (ZeroExt16to64 x) (ZeroExt16to64 y))) -(Rsh16Ux8 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV (ZeroExt16to64 x) (ZeroExt8to64 y))) +(Rsh16Ux64 x y) => (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV (ZeroExt16to64 x) y)) +(Rsh16Ux32 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV (ZeroExt16to64 x) (ZeroExt32to64 y))) +(Rsh16Ux16 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV (ZeroExt16to64 x) (ZeroExt16to64 y))) +(Rsh16Ux8 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV (ZeroExt16to64 x) (ZeroExt8to64 y))) -(Rsh8Ux64 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV (ZeroExt8to64 x) y)) -(Rsh8Ux32 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV (ZeroExt8to64 x) (ZeroExt32to64 y))) -(Rsh8Ux16 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV (ZeroExt8to64 x) (ZeroExt16to64 y))) -(Rsh8Ux8 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV (ZeroExt8to64 x) (ZeroExt8to64 y))) +(Rsh8Ux64 x y) => (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV (ZeroExt8to64 x) y)) +(Rsh8Ux32 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV (ZeroExt8to64 x) (ZeroExt32to64 y))) +(Rsh8Ux16 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV (ZeroExt8to64 x) (ZeroExt16to64 y))) +(Rsh8Ux8 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV (ZeroExt8to64 x) (ZeroExt8to64 y))) -(Rsh64x64 x y) -> (SRAV x (OR (NEGV (SGTU y (MOVVconst [63]))) y)) -(Rsh64x32 x y) -> (SRAV x (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) -(Rsh64x16 x y) -> (SRAV x (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) -(Rsh64x8 x y) -> (SRAV x (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) +(Rsh64x64 x y) => (SRAV x (OR (NEGV (SGTU y (MOVVconst [63]))) y)) +(Rsh64x32 x y) => (SRAV x (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) +(Rsh64x16 x y) => (SRAV x (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) +(Rsh64x8 x y) => (SRAV x (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) -(Rsh32x64 x y) -> (SRAV (SignExt32to64 x) (OR (NEGV (SGTU y (MOVVconst [63]))) y)) -(Rsh32x32 x y) -> (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) -(Rsh32x16 x y) -> (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) -(Rsh32x8 x y) -> (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) +(Rsh32x64 x y) => (SRAV (SignExt32to64 x) (OR (NEGV (SGTU y (MOVVconst [63]))) y)) +(Rsh32x32 x y) => (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) +(Rsh32x16 x y) => (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) +(Rsh32x8 x y) => (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) -(Rsh16x64 x y) -> (SRAV (SignExt16to64 x) (OR (NEGV (SGTU y (MOVVconst [63]))) y)) -(Rsh16x32 x y) -> (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) -(Rsh16x16 x y) -> (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) -(Rsh16x8 x y) -> (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) +(Rsh16x64 x y) => (SRAV (SignExt16to64 x) (OR (NEGV (SGTU y (MOVVconst [63]))) y)) +(Rsh16x32 x y) => (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) +(Rsh16x16 x y) => (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) +(Rsh16x8 x y) => (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) -(Rsh8x64 x y) -> (SRAV (SignExt8to64 x) (OR (NEGV (SGTU y (MOVVconst [63]))) y)) -(Rsh8x32 x y) -> (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) -(Rsh8x16 x y) -> (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) -(Rsh8x8 x y) -> (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) +(Rsh8x64 x y) => (SRAV (SignExt8to64 x) (OR (NEGV (SGTU y (MOVVconst [63]))) y)) +(Rsh8x32 x y) => (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) +(Rsh8x16 x y) => (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) +(Rsh8x8 x y) => (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) // rotates -(RotateLeft8 x (MOVVconst [c])) -> (Or8 (Lsh8x64 x (MOVVconst [c&7])) (Rsh8Ux64 x (MOVVconst [-c&7]))) -(RotateLeft16 x (MOVVconst [c])) -> (Or16 (Lsh16x64 x (MOVVconst [c&15])) (Rsh16Ux64 x (MOVVconst [-c&15]))) -(RotateLeft32 x (MOVVconst [c])) -> (Or32 (Lsh32x64 x (MOVVconst [c&31])) (Rsh32Ux64 x (MOVVconst [-c&31]))) -(RotateLeft64 x (MOVVconst [c])) -> (Or64 (Lsh64x64 x (MOVVconst [c&63])) (Rsh64Ux64 x (MOVVconst [-c&63]))) +(RotateLeft8 x (MOVVconst [c])) => (Or8 (Lsh8x64 x (MOVVconst [c&7])) (Rsh8Ux64 x (MOVVconst [-c&7]))) +(RotateLeft16 x (MOVVconst [c])) => (Or16 (Lsh16x64 x (MOVVconst [c&15])) (Rsh16Ux64 x (MOVVconst [-c&15]))) +(RotateLeft32 x (MOVVconst [c])) => (Or32 (Lsh32x64 x (MOVVconst [c&31])) (Rsh32Ux64 x (MOVVconst [-c&31]))) +(RotateLeft64 x (MOVVconst [c])) => (Or64 (Lsh64x64 x (MOVVconst [c&63])) (Rsh64Ux64 x (MOVVconst [-c&63]))) // unary ops -(Neg(64|32|16|8) ...) -> (NEGV ...) -(Neg(32|64)F ...) -> (NEG(F|D) ...) +(Neg(64|32|16|8) ...) => (NEGV ...) +(Neg(32|64)F ...) => (NEG(F|D) ...) -(Com(64|32|16|8) x) -> (NOR (MOVVconst [0]) x) +(Com(64|32|16|8) x) => (NOR (MOVVconst [0]) x) -(Sqrt ...) -> (SQRTD ...) +(Sqrt ...) => (SQRTD ...) // boolean ops -- booleans are represented with 0=false, 1=true -(AndB ...) -> (AND ...) -(OrB ...) -> (OR ...) -(EqB x y) -> (XOR (MOVVconst [1]) (XOR x y)) -(NeqB ...) -> (XOR ...) -(Not x) -> (XORconst [1] x) +(AndB ...) => (AND ...) +(OrB ...) => (OR ...) +(EqB x y) => (XOR (MOVVconst [1]) (XOR x y)) +(NeqB ...) => (XOR ...) +(Not x) => (XORconst [1] x) // constants (Const(64|32|16|8) ...) -> (MOVVconst ...) (Const(32|64)F ...) -> (MOV(F|D)const ...) -(ConstNil) -> (MOVVconst [0]) +(ConstNil) => (MOVVconst [0]) (ConstBool ...) -> (MOVVconst ...) -(Slicemask x) -> (SRAVconst (NEGV x) [63]) +(Slicemask x) => (SRAVconst (NEGV x) [63]) // truncations // Because we ignore high parts of registers, truncates are just copies. -(Trunc16to8 ...) -> (Copy ...) -(Trunc32to8 ...) -> (Copy ...) -(Trunc32to16 ...) -> (Copy ...) -(Trunc64to8 ...) -> (Copy ...) -(Trunc64to16 ...) -> (Copy ...) -(Trunc64to32 ...) -> (Copy ...) +(Trunc16to8 ...) => (Copy ...) +(Trunc32to8 ...) => (Copy ...) +(Trunc32to16 ...) => (Copy ...) +(Trunc64to8 ...) => (Copy ...) +(Trunc64to16 ...) => (Copy ...) +(Trunc64to32 ...) => (Copy ...) // Zero-/Sign-extensions -(ZeroExt8to16 ...) -> (MOVBUreg ...) -(ZeroExt8to32 ...) -> (MOVBUreg ...) -(ZeroExt16to32 ...) -> (MOVHUreg ...) -(ZeroExt8to64 ...) -> (MOVBUreg ...) -(ZeroExt16to64 ...) -> (MOVHUreg ...) -(ZeroExt32to64 ...) -> (MOVWUreg ...) +(ZeroExt8to16 ...) => (MOVBUreg ...) +(ZeroExt8to32 ...) => (MOVBUreg ...) +(ZeroExt16to32 ...) => (MOVHUreg ...) +(ZeroExt8to64 ...) => (MOVBUreg ...) +(ZeroExt16to64 ...) => (MOVHUreg ...) +(ZeroExt32to64 ...) => (MOVWUreg ...) -(SignExt8to16 ...) -> (MOVBreg ...) -(SignExt8to32 ...) -> (MOVBreg ...) -(SignExt16to32 ...) -> (MOVHreg ...) -(SignExt8to64 ...) -> (MOVBreg ...) -(SignExt16to64 ...) -> (MOVHreg ...) -(SignExt32to64 ...) -> (MOVWreg ...) +(SignExt8to16 ...) => (MOVBreg ...) +(SignExt8to32 ...) => (MOVBreg ...) +(SignExt16to32 ...) => (MOVHreg ...) +(SignExt8to64 ...) => (MOVBreg ...) +(SignExt16to64 ...) => (MOVHreg ...) +(SignExt32to64 ...) => (MOVWreg ...) // float <-> int conversion -(Cvt32to32F ...) -> (MOVWF ...) -(Cvt32to64F ...) -> (MOVWD ...) -(Cvt64to32F ...) -> (MOVVF ...) -(Cvt64to64F ...) -> (MOVVD ...) -(Cvt32Fto32 ...) -> (TRUNCFW ...) -(Cvt64Fto32 ...) -> (TRUNCDW ...) -(Cvt32Fto64 ...) -> (TRUNCFV ...) -(Cvt64Fto64 ...) -> (TRUNCDV ...) -(Cvt32Fto64F ...) -> (MOVFD ...) -(Cvt64Fto32F ...) -> (MOVDF ...) +(Cvt32to32F ...) => (MOVWF ...) +(Cvt32to64F ...) => (MOVWD ...) +(Cvt64to32F ...) => (MOVVF ...) +(Cvt64to64F ...) => (MOVVD ...) +(Cvt32Fto32 ...) => (TRUNCFW ...) +(Cvt64Fto32 ...) => (TRUNCDW ...) +(Cvt32Fto64 ...) => (TRUNCFV ...) +(Cvt64Fto64 ...) => (TRUNCDV ...) +(Cvt32Fto64F ...) => (MOVFD ...) +(Cvt64Fto32F ...) => (MOVDF ...) -(CvtBoolToUint8 ...) -> (Copy ...) +(CvtBoolToUint8 ...) => (Copy ...) -(Round(32|64)F ...) -> (Copy ...) +(Round(32|64)F ...) => (Copy ...) // comparisons -(Eq8 x y) -> (SGTU (MOVVconst [1]) (XOR (ZeroExt8to64 x) (ZeroExt8to64 y))) -(Eq16 x y) -> (SGTU (MOVVconst [1]) (XOR (ZeroExt16to64 x) (ZeroExt16to64 y))) -(Eq32 x y) -> (SGTU (MOVVconst [1]) (XOR (ZeroExt32to64 x) (ZeroExt32to64 y))) -(Eq64 x y) -> (SGTU (MOVVconst [1]) (XOR x y)) -(EqPtr x y) -> (SGTU (MOVVconst [1]) (XOR x y)) -(Eq(32|64)F x y) -> (FPFlagTrue (CMPEQ(F|D) x y)) +(Eq8 x y) => (SGTU (MOVVconst [1]) (XOR (ZeroExt8to64 x) (ZeroExt8to64 y))) +(Eq16 x y) => (SGTU (MOVVconst [1]) (XOR (ZeroExt16to64 x) (ZeroExt16to64 y))) +(Eq32 x y) => (SGTU (MOVVconst [1]) (XOR (ZeroExt32to64 x) (ZeroExt32to64 y))) +(Eq64 x y) => (SGTU (MOVVconst [1]) (XOR x y)) +(EqPtr x y) => (SGTU (MOVVconst [1]) (XOR x y)) +(Eq(32|64)F x y) => (FPFlagTrue (CMPEQ(F|D) x y)) -(Neq8 x y) -> (SGTU (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)) (MOVVconst [0])) -(Neq16 x y) -> (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to64 y)) (MOVVconst [0])) -(Neq32 x y) -> (SGTU (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)) (MOVVconst [0])) -(Neq64 x y) -> (SGTU (XOR x y) (MOVVconst [0])) -(NeqPtr x y) -> (SGTU (XOR x y) (MOVVconst [0])) -(Neq(32|64)F x y) -> (FPFlagFalse (CMPEQ(F|D) x y)) +(Neq8 x y) => (SGTU (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)) (MOVVconst [0])) +(Neq16 x y) => (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to64 y)) (MOVVconst [0])) +(Neq32 x y) => (SGTU (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)) (MOVVconst [0])) +(Neq64 x y) => (SGTU (XOR x y) (MOVVconst [0])) +(NeqPtr x y) => (SGTU (XOR x y) (MOVVconst [0])) +(Neq(32|64)F x y) => (FPFlagFalse (CMPEQ(F|D) x y)) -(Less8 x y) -> (SGT (SignExt8to64 y) (SignExt8to64 x)) -(Less16 x y) -> (SGT (SignExt16to64 y) (SignExt16to64 x)) -(Less32 x y) -> (SGT (SignExt32to64 y) (SignExt32to64 x)) -(Less64 x y) -> (SGT y x) -(Less(32|64)F x y) -> (FPFlagTrue (CMPGT(F|D) y x)) // reverse operands to work around NaN +(Less8 x y) => (SGT (SignExt8to64 y) (SignExt8to64 x)) +(Less16 x y) => (SGT (SignExt16to64 y) (SignExt16to64 x)) +(Less32 x y) => (SGT (SignExt32to64 y) (SignExt32to64 x)) +(Less64 x y) => (SGT y x) +(Less(32|64)F x y) => (FPFlagTrue (CMPGT(F|D) y x)) // reverse operands to work around NaN -(Less8U x y) -> (SGTU (ZeroExt8to64 y) (ZeroExt8to64 x)) -(Less16U x y) -> (SGTU (ZeroExt16to64 y) (ZeroExt16to64 x)) -(Less32U x y) -> (SGTU (ZeroExt32to64 y) (ZeroExt32to64 x)) -(Less64U x y) -> (SGTU y x) +(Less8U x y) => (SGTU (ZeroExt8to64 y) (ZeroExt8to64 x)) +(Less16U x y) => (SGTU (ZeroExt16to64 y) (ZeroExt16to64 x)) +(Less32U x y) => (SGTU (ZeroExt32to64 y) (ZeroExt32to64 x)) +(Less64U x y) => (SGTU y x) -(Leq8 x y) -> (XOR (MOVVconst [1]) (SGT (SignExt8to64 x) (SignExt8to64 y))) -(Leq16 x y) -> (XOR (MOVVconst [1]) (SGT (SignExt16to64 x) (SignExt16to64 y))) -(Leq32 x y) -> (XOR (MOVVconst [1]) (SGT (SignExt32to64 x) (SignExt32to64 y))) -(Leq64 x y) -> (XOR (MOVVconst [1]) (SGT x y)) -(Leq(32|64)F x y) -> (FPFlagTrue (CMPGE(F|D) y x)) // reverse operands to work around NaN +(Leq8 x y) => (XOR (MOVVconst [1]) (SGT (SignExt8to64 x) (SignExt8to64 y))) +(Leq16 x y) => (XOR (MOVVconst [1]) (SGT (SignExt16to64 x) (SignExt16to64 y))) +(Leq32 x y) => (XOR (MOVVconst [1]) (SGT (SignExt32to64 x) (SignExt32to64 y))) +(Leq64 x y) => (XOR (MOVVconst [1]) (SGT x y)) +(Leq(32|64)F x y) => (FPFlagTrue (CMPGE(F|D) y x)) // reverse operands to work around NaN -(Leq8U x y) -> (XOR (MOVVconst [1]) (SGTU (ZeroExt8to64 x) (ZeroExt8to64 y))) -(Leq16U x y) -> (XOR (MOVVconst [1]) (SGTU (ZeroExt16to64 x) (ZeroExt16to64 y))) -(Leq32U x y) -> (XOR (MOVVconst [1]) (SGTU (ZeroExt32to64 x) (ZeroExt32to64 y))) -(Leq64U x y) -> (XOR (MOVVconst [1]) (SGTU x y)) +(Leq8U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt8to64 x) (ZeroExt8to64 y))) +(Leq16U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt16to64 x) (ZeroExt16to64 y))) +(Leq32U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt32to64 x) (ZeroExt32to64 y))) +(Leq64U x y) => (XOR (MOVVconst [1]) (SGTU x y)) (OffPtr [off] ptr:(SP)) -> (MOVVaddr [off] ptr) (OffPtr [off] ptr) -> (ADDVconst [off] ptr) @@ -221,70 +221,70 @@ (LocalAddr {sym} base _) -> (MOVVaddr {sym} base) // loads -(Load ptr mem) && t.IsBoolean() -> (MOVBUload ptr mem) -(Load ptr mem) && (is8BitInt(t) && isSigned(t)) -> (MOVBload ptr mem) -(Load ptr mem) && (is8BitInt(t) && !isSigned(t)) -> (MOVBUload ptr mem) -(Load ptr mem) && (is16BitInt(t) && isSigned(t)) -> (MOVHload ptr mem) -(Load ptr mem) && (is16BitInt(t) && !isSigned(t)) -> (MOVHUload ptr mem) -(Load ptr mem) && (is32BitInt(t) && isSigned(t)) -> (MOVWload ptr mem) -(Load ptr mem) && (is32BitInt(t) && !isSigned(t)) -> (MOVWUload ptr mem) -(Load ptr mem) && (is64BitInt(t) || isPtr(t)) -> (MOVVload ptr mem) -(Load ptr mem) && is32BitFloat(t) -> (MOVFload ptr mem) -(Load ptr mem) && is64BitFloat(t) -> (MOVDload ptr mem) +(Load ptr mem) && t.IsBoolean() => (MOVBUload ptr mem) +(Load ptr mem) && (is8BitInt(t) && isSigned(t)) => (MOVBload ptr mem) +(Load ptr mem) && (is8BitInt(t) && !isSigned(t)) => (MOVBUload ptr mem) +(Load ptr mem) && (is16BitInt(t) && isSigned(t)) => (MOVHload ptr mem) +(Load ptr mem) && (is16BitInt(t) && !isSigned(t)) => (MOVHUload ptr mem) +(Load ptr mem) && (is32BitInt(t) && isSigned(t)) => (MOVWload ptr mem) +(Load ptr mem) && (is32BitInt(t) && !isSigned(t)) => (MOVWUload ptr mem) +(Load ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVVload ptr mem) +(Load ptr mem) && is32BitFloat(t) => (MOVFload ptr mem) +(Load ptr mem) && is64BitFloat(t) => (MOVDload ptr mem) // stores -(Store {t} ptr val mem) && t.(*types.Type).Size() == 1 -> (MOVBstore ptr val mem) -(Store {t} ptr val mem) && t.(*types.Type).Size() == 2 -> (MOVHstore ptr val mem) -(Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type) -> (MOVWstore ptr val mem) -(Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && !is64BitFloat(val.Type) -> (MOVVstore ptr val mem) -(Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) -> (MOVFstore ptr val mem) -(Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) -> (MOVDstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 4 && !is32BitFloat(val.Type) => (MOVWstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 8 && !is64BitFloat(val.Type) => (MOVVstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 4 && is32BitFloat(val.Type) => (MOVFstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 8 && is64BitFloat(val.Type) => (MOVDstore ptr val mem) // zeroing -(Zero [0] _ mem) -> mem -(Zero [1] ptr mem) -> (MOVBstore ptr (MOVVconst [0]) mem) -(Zero [2] {t} ptr mem) && t.(*types.Type).Alignment()%2 == 0 -> +(Zero [0] _ mem) => mem +(Zero [1] ptr mem) => (MOVBstore ptr (MOVVconst [0]) mem) +(Zero [2] {t} ptr mem) && t.Alignment()%2 == 0 => (MOVHstore ptr (MOVVconst [0]) mem) -(Zero [2] ptr mem) -> +(Zero [2] ptr mem) => (MOVBstore [1] ptr (MOVVconst [0]) (MOVBstore [0] ptr (MOVVconst [0]) mem)) -(Zero [4] {t} ptr mem) && t.(*types.Type).Alignment()%4 == 0 -> +(Zero [4] {t} ptr mem) && t.Alignment()%4 == 0 => (MOVWstore ptr (MOVVconst [0]) mem) -(Zero [4] {t} ptr mem) && t.(*types.Type).Alignment()%2 == 0 -> +(Zero [4] {t} ptr mem) && t.Alignment()%2 == 0 => (MOVHstore [2] ptr (MOVVconst [0]) (MOVHstore [0] ptr (MOVVconst [0]) mem)) -(Zero [4] ptr mem) -> +(Zero [4] ptr mem) => (MOVBstore [3] ptr (MOVVconst [0]) (MOVBstore [2] ptr (MOVVconst [0]) (MOVBstore [1] ptr (MOVVconst [0]) (MOVBstore [0] ptr (MOVVconst [0]) mem)))) -(Zero [8] {t} ptr mem) && t.(*types.Type).Alignment()%8 == 0 -> +(Zero [8] {t} ptr mem) && t.Alignment()%8 == 0 => (MOVVstore ptr (MOVVconst [0]) mem) -(Zero [8] {t} ptr mem) && t.(*types.Type).Alignment()%4 == 0 -> +(Zero [8] {t} ptr mem) && t.Alignment()%4 == 0 => (MOVWstore [4] ptr (MOVVconst [0]) (MOVWstore [0] ptr (MOVVconst [0]) mem)) -(Zero [8] {t} ptr mem) && t.(*types.Type).Alignment()%2 == 0 -> +(Zero [8] {t} ptr mem) && t.Alignment()%2 == 0 => (MOVHstore [6] ptr (MOVVconst [0]) (MOVHstore [4] ptr (MOVVconst [0]) (MOVHstore [2] ptr (MOVVconst [0]) (MOVHstore [0] ptr (MOVVconst [0]) mem)))) -(Zero [3] ptr mem) -> +(Zero [3] ptr mem) => (MOVBstore [2] ptr (MOVVconst [0]) (MOVBstore [1] ptr (MOVVconst [0]) (MOVBstore [0] ptr (MOVVconst [0]) mem))) -(Zero [6] {t} ptr mem) && t.(*types.Type).Alignment()%2 == 0 -> +(Zero [6] {t} ptr mem) && t.Alignment()%2 == 0 => (MOVHstore [4] ptr (MOVVconst [0]) (MOVHstore [2] ptr (MOVVconst [0]) (MOVHstore [0] ptr (MOVVconst [0]) mem))) -(Zero [12] {t} ptr mem) && t.(*types.Type).Alignment()%4 == 0 -> +(Zero [12] {t} ptr mem) && t.Alignment()%4 == 0 => (MOVWstore [8] ptr (MOVVconst [0]) (MOVWstore [4] ptr (MOVVconst [0]) (MOVWstore [0] ptr (MOVVconst [0]) mem))) -(Zero [16] {t} ptr mem) && t.(*types.Type).Alignment()%8 == 0 -> +(Zero [16] {t} ptr mem) && t.Alignment()%8 == 0 => (MOVVstore [8] ptr (MOVVconst [0]) (MOVVstore [0] ptr (MOVVconst [0]) mem)) -(Zero [24] {t} ptr mem) && t.(*types.Type).Alignment()%8 == 0 -> +(Zero [24] {t} ptr mem) && t.Alignment()%8 == 0 => (MOVVstore [16] ptr (MOVVconst [0]) (MOVVstore [8] ptr (MOVVconst [0]) (MOVVstore [0] ptr (MOVVconst [0]) mem))) @@ -293,70 +293,70 @@ // 8, and 128 are magic constants, see runtime/mkduff.go (Zero [s] {t} ptr mem) && s%8 == 0 && s > 24 && s <= 8*128 - && t.(*types.Type).Alignment()%8 == 0 && !config.noDuffDevice -> + && t.Alignment()%8 == 0 && !config.noDuffDevice => (DUFFZERO [8 * (128 - s/8)] ptr mem) // large or unaligned zeroing uses a loop (Zero [s] {t} ptr mem) - && (s > 8*128 || config.noDuffDevice) || t.(*types.Type).Alignment()%8 != 0 -> - (LoweredZero [t.(*types.Type).Alignment()] + && (s > 8*128 || config.noDuffDevice) || t.Alignment()%8 != 0 => + (LoweredZero [t.Alignment()] ptr - (ADDVconst ptr [s-moveSize(t.(*types.Type).Alignment(), config)]) + (ADDVconst ptr [s-moveSize(t.Alignment(), config)]) mem) // moves -(Move [0] _ _ mem) -> mem -(Move [1] dst src mem) -> (MOVBstore dst (MOVBload src mem) mem) -(Move [2] {t} dst src mem) && t.(*types.Type).Alignment()%2 == 0 -> +(Move [0] _ _ mem) => mem +(Move [1] dst src mem) => (MOVBstore dst (MOVBload src mem) mem) +(Move [2] {t} dst src mem) && t.Alignment()%2 == 0 => (MOVHstore dst (MOVHload src mem) mem) -(Move [2] dst src mem) -> +(Move [2] dst src mem) => (MOVBstore [1] dst (MOVBload [1] src mem) (MOVBstore dst (MOVBload src mem) mem)) -(Move [4] {t} dst src mem) && t.(*types.Type).Alignment()%4 == 0 -> +(Move [4] {t} dst src mem) && t.Alignment()%4 == 0 => (MOVWstore dst (MOVWload src mem) mem) -(Move [4] {t} dst src mem) && t.(*types.Type).Alignment()%2 == 0 -> +(Move [4] {t} dst src mem) && t.Alignment()%2 == 0 => (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem)) -(Move [4] dst src mem) -> +(Move [4] dst src mem) => (MOVBstore [3] dst (MOVBload [3] src mem) (MOVBstore [2] dst (MOVBload [2] src mem) (MOVBstore [1] dst (MOVBload [1] src mem) (MOVBstore dst (MOVBload src mem) mem)))) -(Move [8] {t} dst src mem) && t.(*types.Type).Alignment()%8 == 0 -> +(Move [8] {t} dst src mem) && t.Alignment()%8 == 0 => (MOVVstore dst (MOVVload src mem) mem) -(Move [8] {t} dst src mem) && t.(*types.Type).Alignment()%4 == 0 -> +(Move [8] {t} dst src mem) && t.Alignment()%4 == 0 => (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem)) -(Move [8] {t} dst src mem) && t.(*types.Type).Alignment()%2 == 0 -> +(Move [8] {t} dst src mem) && t.Alignment()%2 == 0 => (MOVHstore [6] dst (MOVHload [6] src mem) (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem)))) -(Move [3] dst src mem) -> +(Move [3] dst src mem) => (MOVBstore [2] dst (MOVBload [2] src mem) (MOVBstore [1] dst (MOVBload [1] src mem) (MOVBstore dst (MOVBload src mem) mem))) -(Move [6] {t} dst src mem) && t.(*types.Type).Alignment()%2 == 0 -> +(Move [6] {t} dst src mem) && t.Alignment()%2 == 0 => (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem))) -(Move [12] {t} dst src mem) && t.(*types.Type).Alignment()%4 == 0 -> +(Move [12] {t} dst src mem) && t.Alignment()%4 == 0 => (MOVWstore [8] dst (MOVWload [8] src mem) (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem))) -(Move [16] {t} dst src mem) && t.(*types.Type).Alignment()%8 == 0 -> +(Move [16] {t} dst src mem) && t.Alignment()%8 == 0 => (MOVVstore [8] dst (MOVVload [8] src mem) (MOVVstore dst (MOVVload src mem) mem)) -(Move [24] {t} dst src mem) && t.(*types.Type).Alignment()%8 == 0 -> +(Move [24] {t} dst src mem) && t.Alignment()%8 == 0 => (MOVVstore [16] dst (MOVVload [16] src mem) (MOVVstore [8] dst (MOVVload [8] src mem) (MOVVstore dst (MOVVload src mem) mem))) // medium move uses a duff device (Move [s] {t} dst src mem) - && s%8 == 0 && s >= 24 && s <= 8*128 && t.(*types.Type).Alignment()%8 == 0 - && !config.noDuffDevice && logLargeCopy(v, s) -> + && s%8 == 0 && s >= 24 && s <= 8*128 && t.Alignment()%8 == 0 + && !config.noDuffDevice && logLargeCopy(v, s) => (DUFFCOPY [16 * (128 - s/8)] dst src mem) // 16 and 128 are magic constants. 16 is the number of bytes to encode: // MOVV (R1), R23 @@ -367,17 +367,17 @@ // large or unaligned move uses a loop (Move [s] {t} dst src mem) - && s > 24 && logLargeCopy(v, s) || t.(*types.Type).Alignment()%8 != 0 -> - (LoweredMove [t.(*types.Type).Alignment()] + && s > 24 && logLargeCopy(v, s) || t.Alignment()%8 != 0 => + (LoweredMove [t.Alignment()] dst src - (ADDVconst src [s-moveSize(t.(*types.Type).Alignment(), config)]) + (ADDVconst src [s-moveSize(t.Alignment(), config)]) mem) // calls -(StaticCall ...) -> (CALLstatic ...) -(ClosureCall ...) -> (CALLclosure ...) -(InterCall ...) -> (CALLinter ...) +(StaticCall ...) => (CALLstatic ...) +(ClosureCall ...) => (CALLclosure ...) +(InterCall ...) => (CALLinter ...) // atomic intrinsics (AtomicLoad8 ...) -> (LoweredAtomicLoad8 ...) @@ -400,48 +400,48 @@ (AtomicCompareAndSwap64 ...) -> (LoweredAtomicCas64 ...) // checks -(NilCheck ...) -> (LoweredNilCheck ...) -(IsNonNil ptr) -> (SGTU ptr (MOVVconst [0])) -(IsInBounds idx len) -> (SGTU len idx) -(IsSliceInBounds idx len) -> (XOR (MOVVconst [1]) (SGTU idx len)) +(NilCheck ...) => (LoweredNilCheck ...) +(IsNonNil ptr) => (SGTU ptr (MOVVconst [0])) +(IsInBounds idx len) => (SGTU len idx) +(IsSliceInBounds idx len) => (XOR (MOVVconst [1]) (SGTU idx len)) // pseudo-ops -(GetClosurePtr ...) -> (LoweredGetClosurePtr ...) -(GetCallerSP ...) -> (LoweredGetCallerSP ...) -(GetCallerPC ...) -> (LoweredGetCallerPC ...) +(GetClosurePtr ...) => (LoweredGetClosurePtr ...) +(GetCallerSP ...) => (LoweredGetCallerSP ...) +(GetCallerPC ...) => (LoweredGetCallerPC ...) -(If cond yes no) -> (NE cond yes no) +(If cond yes no) => (NE cond yes no) // Write barrier. -(WB ...) -> (LoweredWB ...) +(WB ...) => (LoweredWB ...) -(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 -> (LoweredPanicBoundsA [kind] x y mem) -(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 -> (LoweredPanicBoundsB [kind] x y mem) -(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 -> (LoweredPanicBoundsC [kind] x y mem) +(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem) +(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem) +(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem) // Optimizations // Absorb boolean tests into block -(NE (FPFlagTrue cmp) yes no) -> (FPT cmp yes no) -(NE (FPFlagFalse cmp) yes no) -> (FPF cmp yes no) -(EQ (FPFlagTrue cmp) yes no) -> (FPF cmp yes no) -(EQ (FPFlagFalse cmp) yes no) -> (FPT cmp yes no) -(NE (XORconst [1] cmp:(SGT _ _)) yes no) -> (EQ cmp yes no) -(NE (XORconst [1] cmp:(SGTU _ _)) yes no) -> (EQ cmp yes no) -(NE (XORconst [1] cmp:(SGTconst _)) yes no) -> (EQ cmp yes no) -(NE (XORconst [1] cmp:(SGTUconst _)) yes no) -> (EQ cmp yes no) -(EQ (XORconst [1] cmp:(SGT _ _)) yes no) -> (NE cmp yes no) -(EQ (XORconst [1] cmp:(SGTU _ _)) yes no) -> (NE cmp yes no) -(EQ (XORconst [1] cmp:(SGTconst _)) yes no) -> (NE cmp yes no) -(EQ (XORconst [1] cmp:(SGTUconst _)) yes no) -> (NE cmp yes no) -(NE (SGTUconst [1] x) yes no) -> (EQ x yes no) -(EQ (SGTUconst [1] x) yes no) -> (NE x yes no) -(NE (SGTU x (MOVVconst [0])) yes no) -> (NE x yes no) -(EQ (SGTU x (MOVVconst [0])) yes no) -> (EQ x yes no) -(NE (SGTconst [0] x) yes no) -> (LTZ x yes no) -(EQ (SGTconst [0] x) yes no) -> (GEZ x yes no) -(NE (SGT x (MOVVconst [0])) yes no) -> (GTZ x yes no) -(EQ (SGT x (MOVVconst [0])) yes no) -> (LEZ x yes no) +(NE (FPFlagTrue cmp) yes no) => (FPT cmp yes no) +(NE (FPFlagFalse cmp) yes no) => (FPF cmp yes no) +(EQ (FPFlagTrue cmp) yes no) => (FPF cmp yes no) +(EQ (FPFlagFalse cmp) yes no) => (FPT cmp yes no) +(NE (XORconst [1] cmp:(SGT _ _)) yes no) => (EQ cmp yes no) +(NE (XORconst [1] cmp:(SGTU _ _)) yes no) => (EQ cmp yes no) +(NE (XORconst [1] cmp:(SGTconst _)) yes no) => (EQ cmp yes no) +(NE (XORconst [1] cmp:(SGTUconst _)) yes no) => (EQ cmp yes no) +(EQ (XORconst [1] cmp:(SGT _ _)) yes no) => (NE cmp yes no) +(EQ (XORconst [1] cmp:(SGTU _ _)) yes no) => (NE cmp yes no) +(EQ (XORconst [1] cmp:(SGTconst _)) yes no) => (NE cmp yes no) +(EQ (XORconst [1] cmp:(SGTUconst _)) yes no) => (NE cmp yes no) +(NE (SGTUconst [1] x) yes no) => (EQ x yes no) +(EQ (SGTUconst [1] x) yes no) => (NE x yes no) +(NE (SGTU x (MOVVconst [0])) yes no) => (NE x yes no) +(EQ (SGTU x (MOVVconst [0])) yes no) => (EQ x yes no) +(NE (SGTconst [0] x) yes no) => (LTZ x yes no) +(EQ (SGTconst [0] x) yes no) => (GEZ x yes no) +(NE (SGT x (MOVVconst [0])) yes no) => (GTZ x yes no) +(EQ (SGT x (MOVVconst [0])) yes no) => (LEZ x yes no) // fold offset into address (ADDVconst [off1] (MOVVaddr [off2] {sym} ptr)) -> (MOVVaddr [off1+off2] {sym} ptr) @@ -509,178 +509,178 @@ (MOVVstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) // store zero -(MOVBstore [off] {sym} ptr (MOVVconst [0]) mem) -> (MOVBstorezero [off] {sym} ptr mem) -(MOVHstore [off] {sym} ptr (MOVVconst [0]) mem) -> (MOVHstorezero [off] {sym} ptr mem) -(MOVWstore [off] {sym} ptr (MOVVconst [0]) mem) -> (MOVWstorezero [off] {sym} ptr mem) -(MOVVstore [off] {sym} ptr (MOVVconst [0]) mem) -> (MOVVstorezero [off] {sym} ptr mem) +(MOVBstore [off] {sym} ptr (MOVVconst [0]) mem) => (MOVBstorezero [off] {sym} ptr mem) +(MOVHstore [off] {sym} ptr (MOVVconst [0]) mem) => (MOVHstorezero [off] {sym} ptr mem) +(MOVWstore [off] {sym} ptr (MOVVconst [0]) mem) => (MOVWstorezero [off] {sym} ptr mem) +(MOVVstore [off] {sym} ptr (MOVVconst [0]) mem) => (MOVVstorezero [off] {sym} ptr mem) // don't extend after proper load -(MOVBreg x:(MOVBload _ _)) -> (MOVVreg x) -(MOVBUreg x:(MOVBUload _ _)) -> (MOVVreg x) -(MOVHreg x:(MOVBload _ _)) -> (MOVVreg x) -(MOVHreg x:(MOVBUload _ _)) -> (MOVVreg x) -(MOVHreg x:(MOVHload _ _)) -> (MOVVreg x) -(MOVHUreg x:(MOVBUload _ _)) -> (MOVVreg x) -(MOVHUreg x:(MOVHUload _ _)) -> (MOVVreg x) -(MOVWreg x:(MOVBload _ _)) -> (MOVVreg x) -(MOVWreg x:(MOVBUload _ _)) -> (MOVVreg x) -(MOVWreg x:(MOVHload _ _)) -> (MOVVreg x) -(MOVWreg x:(MOVHUload _ _)) -> (MOVVreg x) -(MOVWreg x:(MOVWload _ _)) -> (MOVVreg x) -(MOVWUreg x:(MOVBUload _ _)) -> (MOVVreg x) -(MOVWUreg x:(MOVHUload _ _)) -> (MOVVreg x) -(MOVWUreg x:(MOVWUload _ _)) -> (MOVVreg x) +(MOVBreg x:(MOVBload _ _)) => (MOVVreg x) +(MOVBUreg x:(MOVBUload _ _)) => (MOVVreg x) +(MOVHreg x:(MOVBload _ _)) => (MOVVreg x) +(MOVHreg x:(MOVBUload _ _)) => (MOVVreg x) +(MOVHreg x:(MOVHload _ _)) => (MOVVreg x) +(MOVHUreg x:(MOVBUload _ _)) => (MOVVreg x) +(MOVHUreg x:(MOVHUload _ _)) => (MOVVreg x) +(MOVWreg x:(MOVBload _ _)) => (MOVVreg x) +(MOVWreg x:(MOVBUload _ _)) => (MOVVreg x) +(MOVWreg x:(MOVHload _ _)) => (MOVVreg x) +(MOVWreg x:(MOVHUload _ _)) => (MOVVreg x) +(MOVWreg x:(MOVWload _ _)) => (MOVVreg x) +(MOVWUreg x:(MOVBUload _ _)) => (MOVVreg x) +(MOVWUreg x:(MOVHUload _ _)) => (MOVVreg x) +(MOVWUreg x:(MOVWUload _ _)) => (MOVVreg x) // fold double extensions -(MOVBreg x:(MOVBreg _)) -> (MOVVreg x) -(MOVBUreg x:(MOVBUreg _)) -> (MOVVreg x) -(MOVHreg x:(MOVBreg _)) -> (MOVVreg x) -(MOVHreg x:(MOVBUreg _)) -> (MOVVreg x) -(MOVHreg x:(MOVHreg _)) -> (MOVVreg x) -(MOVHUreg x:(MOVBUreg _)) -> (MOVVreg x) -(MOVHUreg x:(MOVHUreg _)) -> (MOVVreg x) -(MOVWreg x:(MOVBreg _)) -> (MOVVreg x) -(MOVWreg x:(MOVBUreg _)) -> (MOVVreg x) -(MOVWreg x:(MOVHreg _)) -> (MOVVreg x) -(MOVWreg x:(MOVWreg _)) -> (MOVVreg x) -(MOVWUreg x:(MOVBUreg _)) -> (MOVVreg x) -(MOVWUreg x:(MOVHUreg _)) -> (MOVVreg x) -(MOVWUreg x:(MOVWUreg _)) -> (MOVVreg x) +(MOVBreg x:(MOVBreg _)) => (MOVVreg x) +(MOVBUreg x:(MOVBUreg _)) => (MOVVreg x) +(MOVHreg x:(MOVBreg _)) => (MOVVreg x) +(MOVHreg x:(MOVBUreg _)) => (MOVVreg x) +(MOVHreg x:(MOVHreg _)) => (MOVVreg x) +(MOVHUreg x:(MOVBUreg _)) => (MOVVreg x) +(MOVHUreg x:(MOVHUreg _)) => (MOVVreg x) +(MOVWreg x:(MOVBreg _)) => (MOVVreg x) +(MOVWreg x:(MOVBUreg _)) => (MOVVreg x) +(MOVWreg x:(MOVHreg _)) => (MOVVreg x) +(MOVWreg x:(MOVWreg _)) => (MOVVreg x) +(MOVWUreg x:(MOVBUreg _)) => (MOVVreg x) +(MOVWUreg x:(MOVHUreg _)) => (MOVVreg x) +(MOVWUreg x:(MOVWUreg _)) => (MOVVreg x) // don't extend before store -(MOVBstore [off] {sym} ptr (MOVBreg x) mem) -> (MOVBstore [off] {sym} ptr x mem) -(MOVBstore [off] {sym} ptr (MOVBUreg x) mem) -> (MOVBstore [off] {sym} ptr x mem) -(MOVBstore [off] {sym} ptr (MOVHreg x) mem) -> (MOVBstore [off] {sym} ptr x mem) -(MOVBstore [off] {sym} ptr (MOVHUreg x) mem) -> (MOVBstore [off] {sym} ptr x mem) -(MOVBstore [off] {sym} ptr (MOVWreg x) mem) -> (MOVBstore [off] {sym} ptr x mem) -(MOVBstore [off] {sym} ptr (MOVWUreg x) mem) -> (MOVBstore [off] {sym} ptr x mem) -(MOVHstore [off] {sym} ptr (MOVHreg x) mem) -> (MOVHstore [off] {sym} ptr x mem) -(MOVHstore [off] {sym} ptr (MOVHUreg x) mem) -> (MOVHstore [off] {sym} ptr x mem) -(MOVHstore [off] {sym} ptr (MOVWreg x) mem) -> (MOVHstore [off] {sym} ptr x mem) -(MOVHstore [off] {sym} ptr (MOVWUreg x) mem) -> (MOVHstore [off] {sym} ptr x mem) -(MOVWstore [off] {sym} ptr (MOVWreg x) mem) -> (MOVWstore [off] {sym} ptr x mem) -(MOVWstore [off] {sym} ptr (MOVWUreg x) mem) -> (MOVWstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr (MOVBreg x) mem) => (MOVBstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr (MOVBUreg x) mem) => (MOVBstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr (MOVHreg x) mem) => (MOVBstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVBstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr (MOVWreg x) mem) => (MOVBstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVBstore [off] {sym} ptr x mem) +(MOVHstore [off] {sym} ptr (MOVHreg x) mem) => (MOVHstore [off] {sym} ptr x mem) +(MOVHstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVHstore [off] {sym} ptr x mem) +(MOVHstore [off] {sym} ptr (MOVWreg x) mem) => (MOVHstore [off] {sym} ptr x mem) +(MOVHstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVHstore [off] {sym} ptr x mem) +(MOVWstore [off] {sym} ptr (MOVWreg x) mem) => (MOVWstore [off] {sym} ptr x mem) +(MOVWstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVWstore [off] {sym} ptr x mem) // if a register move has only 1 use, just use the same register without emitting instruction // MOVVnop doesn't emit instruction, only for ensuring the type. -(MOVVreg x) && x.Uses == 1 -> (MOVVnop x) +(MOVVreg x) && x.Uses == 1 => (MOVVnop x) // fold constant into arithmatic ops -(ADDV x (MOVVconst [c])) && is32Bit(c) -> (ADDVconst [c] x) -(SUBV x (MOVVconst [c])) && is32Bit(c) -> (SUBVconst [c] x) -(AND x (MOVVconst [c])) && is32Bit(c) -> (ANDconst [c] x) -(OR x (MOVVconst [c])) && is32Bit(c) -> (ORconst [c] x) -(XOR x (MOVVconst [c])) && is32Bit(c) -> (XORconst [c] x) -(NOR x (MOVVconst [c])) && is32Bit(c) -> (NORconst [c] x) +(ADDV x (MOVVconst [c])) && is32Bit(c) => (ADDVconst [c] x) +(SUBV x (MOVVconst [c])) && is32Bit(c) => (SUBVconst [c] x) +(AND x (MOVVconst [c])) && is32Bit(c) => (ANDconst [c] x) +(OR x (MOVVconst [c])) && is32Bit(c) => (ORconst [c] x) +(XOR x (MOVVconst [c])) && is32Bit(c) => (XORconst [c] x) +(NOR x (MOVVconst [c])) && is32Bit(c) => (NORconst [c] x) -(SLLV _ (MOVVconst [c])) && uint64(c)>=64 -> (MOVVconst [0]) -(SRLV _ (MOVVconst [c])) && uint64(c)>=64 -> (MOVVconst [0]) -(SRAV x (MOVVconst [c])) && uint64(c)>=64 -> (SRAVconst x [63]) -(SLLV x (MOVVconst [c])) -> (SLLVconst x [c]) -(SRLV x (MOVVconst [c])) -> (SRLVconst x [c]) -(SRAV x (MOVVconst [c])) -> (SRAVconst x [c]) +(SLLV _ (MOVVconst [c])) && uint64(c)>=64 => (MOVVconst [0]) +(SRLV _ (MOVVconst [c])) && uint64(c)>=64 => (MOVVconst [0]) +(SRAV x (MOVVconst [c])) && uint64(c)>=64 => (SRAVconst x [63]) +(SLLV x (MOVVconst [c])) => (SLLVconst x [c]) +(SRLV x (MOVVconst [c])) => (SRLVconst x [c]) +(SRAV x (MOVVconst [c])) => (SRAVconst x [c]) -(SGT (MOVVconst [c]) x) && is32Bit(c) -> (SGTconst [c] x) -(SGTU (MOVVconst [c]) x) && is32Bit(c) -> (SGTUconst [c] x) +(SGT (MOVVconst [c]) x) && is32Bit(c) => (SGTconst [c] x) +(SGTU (MOVVconst [c]) x) && is32Bit(c) => (SGTUconst [c] x) // mul by constant -(Select1 (MULVU x (MOVVconst [-1]))) -> (NEGV x) -(Select1 (MULVU _ (MOVVconst [0]))) -> (MOVVconst [0]) -(Select1 (MULVU x (MOVVconst [1]))) -> x -(Select1 (MULVU x (MOVVconst [c]))) && isPowerOfTwo(c) -> (SLLVconst [log2(c)] x) +(Select1 (MULVU x (MOVVconst [-1]))) => (NEGV x) +(Select1 (MULVU _ (MOVVconst [0]))) => (MOVVconst [0]) +(Select1 (MULVU x (MOVVconst [1]))) => x +(Select1 (MULVU x (MOVVconst [c]))) && isPowerOfTwo(c) => (SLLVconst [log2(c)] x) // div by constant -(Select1 (DIVVU x (MOVVconst [1]))) -> x -(Select1 (DIVVU x (MOVVconst [c]))) && isPowerOfTwo(c) -> (SRLVconst [log2(c)] x) -(Select0 (DIVVU _ (MOVVconst [1]))) -> (MOVVconst [0]) // mod -(Select0 (DIVVU x (MOVVconst [c]))) && isPowerOfTwo(c) -> (ANDconst [c-1] x) // mod +(Select1 (DIVVU x (MOVVconst [1]))) => x +(Select1 (DIVVU x (MOVVconst [c]))) && isPowerOfTwo(c) => (SRLVconst [log2(c)] x) +(Select0 (DIVVU _ (MOVVconst [1]))) => (MOVVconst [0]) // mod +(Select0 (DIVVU x (MOVVconst [c]))) && isPowerOfTwo(c) => (ANDconst [c-1] x) // mod // generic simplifications -(ADDV x (NEGV y)) -> (SUBV x y) -(SUBV x x) -> (MOVVconst [0]) -(SUBV (MOVVconst [0]) x) -> (NEGV x) -(AND x x) -> x -(OR x x) -> x -(XOR x x) -> (MOVVconst [0]) +(ADDV x (NEGV y)) => (SUBV x y) +(SUBV x x) => (MOVVconst [0]) +(SUBV (MOVVconst [0]) x) => (NEGV x) +(AND x x) => x +(OR x x) => x +(XOR x x) => (MOVVconst [0]) // remove redundant *const ops -(ADDVconst [0] x) -> x -(SUBVconst [0] x) -> x -(ANDconst [0] _) -> (MOVVconst [0]) -(ANDconst [-1] x) -> x -(ORconst [0] x) -> x -(ORconst [-1] _) -> (MOVVconst [-1]) -(XORconst [0] x) -> x -(XORconst [-1] x) -> (NORconst [0] x) +(ADDVconst [0] x) => x +(SUBVconst [0] x) => x +(ANDconst [0] _) => (MOVVconst [0]) +(ANDconst [-1] x) => x +(ORconst [0] x) => x +(ORconst [-1] _) => (MOVVconst [-1]) +(XORconst [0] x) => x +(XORconst [-1] x) => (NORconst [0] x) // generic constant folding -(ADDVconst [c] (MOVVconst [d])) -> (MOVVconst [c+d]) -(ADDVconst [c] (ADDVconst [d] x)) && is32Bit(c+d) -> (ADDVconst [c+d] x) -(ADDVconst [c] (SUBVconst [d] x)) && is32Bit(c-d) -> (ADDVconst [c-d] x) -(SUBVconst [c] (MOVVconst [d])) -> (MOVVconst [d-c]) -(SUBVconst [c] (SUBVconst [d] x)) && is32Bit(-c-d) -> (ADDVconst [-c-d] x) -(SUBVconst [c] (ADDVconst [d] x)) && is32Bit(-c+d) -> (ADDVconst [-c+d] x) -(SLLVconst [c] (MOVVconst [d])) -> (MOVVconst [d< (MOVVconst [int64(uint64(d)>>uint64(c))]) -(SRAVconst [c] (MOVVconst [d])) -> (MOVVconst [d>>uint64(c)]) -(Select1 (MULVU (MOVVconst [c]) (MOVVconst [d]))) -> (MOVVconst [c*d]) -(Select1 (DIVV (MOVVconst [c]) (MOVVconst [d]))) -> (MOVVconst [c/d]) -(Select1 (DIVVU (MOVVconst [c]) (MOVVconst [d]))) -> (MOVVconst [int64(uint64(c)/uint64(d))]) -(Select0 (DIVV (MOVVconst [c]) (MOVVconst [d]))) -> (MOVVconst [c%d]) // mod -(Select0 (DIVVU (MOVVconst [c]) (MOVVconst [d]))) -> (MOVVconst [int64(uint64(c)%uint64(d))]) // mod -(ANDconst [c] (MOVVconst [d])) -> (MOVVconst [c&d]) -(ANDconst [c] (ANDconst [d] x)) -> (ANDconst [c&d] x) -(ORconst [c] (MOVVconst [d])) -> (MOVVconst [c|d]) -(ORconst [c] (ORconst [d] x)) && is32Bit(c|d) -> (ORconst [c|d] x) -(XORconst [c] (MOVVconst [d])) -> (MOVVconst [c^d]) -(XORconst [c] (XORconst [d] x)) && is32Bit(c^d) -> (XORconst [c^d] x) -(NORconst [c] (MOVVconst [d])) -> (MOVVconst [^(c|d)]) -(NEGV (MOVVconst [c])) -> (MOVVconst [-c]) -(MOVBreg (MOVVconst [c])) -> (MOVVconst [int64(int8(c))]) -(MOVBUreg (MOVVconst [c])) -> (MOVVconst [int64(uint8(c))]) -(MOVHreg (MOVVconst [c])) -> (MOVVconst [int64(int16(c))]) -(MOVHUreg (MOVVconst [c])) -> (MOVVconst [int64(uint16(c))]) -(MOVWreg (MOVVconst [c])) -> (MOVVconst [int64(int32(c))]) -(MOVWUreg (MOVVconst [c])) -> (MOVVconst [int64(uint32(c))]) -(MOVVreg (MOVVconst [c])) -> (MOVVconst [c]) +(ADDVconst [c] (MOVVconst [d])) => (MOVVconst [c+d]) +(ADDVconst [c] (ADDVconst [d] x)) && is32Bit(c+d) => (ADDVconst [c+d] x) +(ADDVconst [c] (SUBVconst [d] x)) && is32Bit(c-d) => (ADDVconst [c-d] x) +(SUBVconst [c] (MOVVconst [d])) => (MOVVconst [d-c]) +(SUBVconst [c] (SUBVconst [d] x)) && is32Bit(-c-d) => (ADDVconst [-c-d] x) +(SUBVconst [c] (ADDVconst [d] x)) && is32Bit(-c+d) => (ADDVconst [-c+d] x) +(SLLVconst [c] (MOVVconst [d])) => (MOVVconst [d< (MOVVconst [int64(uint64(d)>>uint64(c))]) +(SRAVconst [c] (MOVVconst [d])) => (MOVVconst [d>>uint64(c)]) +(Select1 (MULVU (MOVVconst [c]) (MOVVconst [d]))) => (MOVVconst [c*d]) +(Select1 (DIVV (MOVVconst [c]) (MOVVconst [d]))) => (MOVVconst [c/d]) +(Select1 (DIVVU (MOVVconst [c]) (MOVVconst [d]))) => (MOVVconst [int64(uint64(c)/uint64(d))]) +(Select0 (DIVV (MOVVconst [c]) (MOVVconst [d]))) => (MOVVconst [c%d]) // mod +(Select0 (DIVVU (MOVVconst [c]) (MOVVconst [d]))) => (MOVVconst [int64(uint64(c)%uint64(d))]) // mod +(ANDconst [c] (MOVVconst [d])) => (MOVVconst [c&d]) +(ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x) +(ORconst [c] (MOVVconst [d])) => (MOVVconst [c|d]) +(ORconst [c] (ORconst [d] x)) && is32Bit(c|d) => (ORconst [c|d] x) +(XORconst [c] (MOVVconst [d])) => (MOVVconst [c^d]) +(XORconst [c] (XORconst [d] x)) && is32Bit(c^d) => (XORconst [c^d] x) +(NORconst [c] (MOVVconst [d])) => (MOVVconst [^(c|d)]) +(NEGV (MOVVconst [c])) => (MOVVconst [-c]) +(MOVBreg (MOVVconst [c])) => (MOVVconst [int64(int8(c))]) +(MOVBUreg (MOVVconst [c])) => (MOVVconst [int64(uint8(c))]) +(MOVHreg (MOVVconst [c])) => (MOVVconst [int64(int16(c))]) +(MOVHUreg (MOVVconst [c])) => (MOVVconst [int64(uint16(c))]) +(MOVWreg (MOVVconst [c])) => (MOVVconst [int64(int32(c))]) +(MOVWUreg (MOVVconst [c])) => (MOVVconst [int64(uint32(c))]) +(MOVVreg (MOVVconst [c])) => (MOVVconst [c]) (LoweredAtomicStore32 ptr (MOVVconst [0]) mem) -> (LoweredAtomicStorezero32 ptr mem) (LoweredAtomicStore64 ptr (MOVVconst [0]) mem) -> (LoweredAtomicStorezero64 ptr mem) (LoweredAtomicAdd32 ptr (MOVVconst [c]) mem) && is32Bit(c) -> (LoweredAtomicAddconst32 [c] ptr mem) (LoweredAtomicAdd64 ptr (MOVVconst [c]) mem) && is32Bit(c) -> (LoweredAtomicAddconst64 [c] ptr mem) // constant comparisons -(SGTconst [c] (MOVVconst [d])) && c>d -> (MOVVconst [1]) -(SGTconst [c] (MOVVconst [d])) && c<=d -> (MOVVconst [0]) -(SGTUconst [c] (MOVVconst [d])) && uint64(c)>uint64(d) -> (MOVVconst [1]) -(SGTUconst [c] (MOVVconst [d])) && uint64(c)<=uint64(d) -> (MOVVconst [0]) +(SGTconst [c] (MOVVconst [d])) && c>d => (MOVVconst [1]) +(SGTconst [c] (MOVVconst [d])) && c<=d => (MOVVconst [0]) +(SGTUconst [c] (MOVVconst [d])) && uint64(c)>uint64(d) => (MOVVconst [1]) +(SGTUconst [c] (MOVVconst [d])) && uint64(c)<=uint64(d) => (MOVVconst [0]) // other known comparisons -(SGTconst [c] (MOVBreg _)) && 0x7f < c -> (MOVVconst [1]) -(SGTconst [c] (MOVBreg _)) && c <= -0x80 -> (MOVVconst [0]) -(SGTconst [c] (MOVBUreg _)) && 0xff < c -> (MOVVconst [1]) -(SGTconst [c] (MOVBUreg _)) && c < 0 -> (MOVVconst [0]) -(SGTUconst [c] (MOVBUreg _)) && 0xff < uint64(c) -> (MOVVconst [1]) -(SGTconst [c] (MOVHreg _)) && 0x7fff < c -> (MOVVconst [1]) -(SGTconst [c] (MOVHreg _)) && c <= -0x8000 -> (MOVVconst [0]) -(SGTconst [c] (MOVHUreg _)) && 0xffff < c -> (MOVVconst [1]) -(SGTconst [c] (MOVHUreg _)) && c < 0 -> (MOVVconst [0]) -(SGTUconst [c] (MOVHUreg _)) && 0xffff < uint64(c) -> (MOVVconst [1]) -(SGTconst [c] (MOVWUreg _)) && c < 0 -> (MOVVconst [0]) -(SGTconst [c] (ANDconst [m] _)) && 0 <= m && m < c -> (MOVVconst [1]) -(SGTUconst [c] (ANDconst [m] _)) && uint64(m) < uint64(c) -> (MOVVconst [1]) -(SGTconst [c] (SRLVconst _ [d])) && 0 <= c && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c) -> (MOVVconst [1]) -(SGTUconst [c] (SRLVconst _ [d])) && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c) -> (MOVVconst [1]) +(SGTconst [c] (MOVBreg _)) && 0x7f < c => (MOVVconst [1]) +(SGTconst [c] (MOVBreg _)) && c <= -0x80 => (MOVVconst [0]) +(SGTconst [c] (MOVBUreg _)) && 0xff < c => (MOVVconst [1]) +(SGTconst [c] (MOVBUreg _)) && c < 0 => (MOVVconst [0]) +(SGTUconst [c] (MOVBUreg _)) && 0xff < uint64(c) => (MOVVconst [1]) +(SGTconst [c] (MOVHreg _)) && 0x7fff < c => (MOVVconst [1]) +(SGTconst [c] (MOVHreg _)) && c <= -0x8000 => (MOVVconst [0]) +(SGTconst [c] (MOVHUreg _)) && 0xffff < c => (MOVVconst [1]) +(SGTconst [c] (MOVHUreg _)) && c < 0 => (MOVVconst [0]) +(SGTUconst [c] (MOVHUreg _)) && 0xffff < uint64(c) => (MOVVconst [1]) +(SGTconst [c] (MOVWUreg _)) && c < 0 => (MOVVconst [0]) +(SGTconst [c] (ANDconst [m] _)) && 0 <= m && m < c => (MOVVconst [1]) +(SGTUconst [c] (ANDconst [m] _)) && uint64(m) < uint64(c) => (MOVVconst [1]) +(SGTconst [c] (SRLVconst _ [d])) && 0 <= c && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c) => (MOVVconst [1]) +(SGTUconst [c] (SRLVconst _ [d])) && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c) => (MOVVconst [1]) // absorb constants into branches -(EQ (MOVVconst [0]) yes no) -> (First yes no) -(EQ (MOVVconst [c]) yes no) && c != 0 -> (First no yes) -(NE (MOVVconst [0]) yes no) -> (First no yes) -(NE (MOVVconst [c]) yes no) && c != 0 -> (First yes no) -(LTZ (MOVVconst [c]) yes no) && c < 0 -> (First yes no) -(LTZ (MOVVconst [c]) yes no) && c >= 0 -> (First no yes) -(LEZ (MOVVconst [c]) yes no) && c <= 0 -> (First yes no) -(LEZ (MOVVconst [c]) yes no) && c > 0 -> (First no yes) -(GTZ (MOVVconst [c]) yes no) && c > 0 -> (First yes no) -(GTZ (MOVVconst [c]) yes no) && c <= 0 -> (First no yes) -(GEZ (MOVVconst [c]) yes no) && c >= 0 -> (First yes no) -(GEZ (MOVVconst [c]) yes no) && c < 0 -> (First no yes) +(EQ (MOVVconst [0]) yes no) => (First yes no) +(EQ (MOVVconst [c]) yes no) && c != 0 => (First no yes) +(NE (MOVVconst [0]) yes no) => (First no yes) +(NE (MOVVconst [c]) yes no) && c != 0 => (First yes no) +(LTZ (MOVVconst [c]) yes no) && c < 0 => (First yes no) +(LTZ (MOVVconst [c]) yes no) && c >= 0 => (First no yes) +(LEZ (MOVVconst [c]) yes no) && c <= 0 => (First yes no) +(LEZ (MOVVconst [c]) yes no) && c > 0 => (First no yes) +(GTZ (MOVVconst [c]) yes no) && c > 0 => (First yes no) +(GTZ (MOVVconst [c]) yes no) && c <= 0 => (First no yes) +(GEZ (MOVVconst [c]) yes no) && c >= 0 => (First yes no) +(GEZ (MOVVconst [c]) yes no) && c < 0 => (First no yes) diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS64.go b/src/cmd/compile/internal/ssa/rewriteMIPS64.go index 051de9392e..ae7f496657 100644 --- a/src/cmd/compile/internal/ssa/rewriteMIPS64.go +++ b/src/cmd/compile/internal/ssa/rewriteMIPS64.go @@ -714,7 +714,7 @@ func rewriteValueMIPS64_OpCom16(v *Value) bool { x := v_0 v.reset(OpMIPS64NOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v0.AuxInt = 0 + v0.AuxInt = int64ToAuxInt(0) v.AddArg2(v0, x) return true } @@ -729,7 +729,7 @@ func rewriteValueMIPS64_OpCom32(v *Value) bool { x := v_0 v.reset(OpMIPS64NOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v0.AuxInt = 0 + v0.AuxInt = int64ToAuxInt(0) v.AddArg2(v0, x) return true } @@ -744,7 +744,7 @@ func rewriteValueMIPS64_OpCom64(v *Value) bool { x := v_0 v.reset(OpMIPS64NOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v0.AuxInt = 0 + v0.AuxInt = int64ToAuxInt(0) v.AddArg2(v0, x) return true } @@ -759,7 +759,7 @@ func rewriteValueMIPS64_OpCom8(v *Value) bool { x := v_0 v.reset(OpMIPS64NOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v0.AuxInt = 0 + v0.AuxInt = int64ToAuxInt(0) v.AddArg2(v0, x) return true } @@ -769,7 +769,7 @@ func rewriteValueMIPS64_OpConstNil(v *Value) bool { // result: (MOVVconst [0]) for { v.reset(OpMIPS64MOVVconst) - v.AuxInt = 0 + v.AuxInt = int64ToAuxInt(0) return true } } @@ -945,7 +945,7 @@ func rewriteValueMIPS64_OpEq16(v *Value) bool { y := v_1 v.reset(OpMIPS64SGTU) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v0.AuxInt = 1 + v0.AuxInt = int64ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64) v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v2.AddArg(x) @@ -968,7 +968,7 @@ func rewriteValueMIPS64_OpEq32(v *Value) bool { y := v_1 v.reset(OpMIPS64SGTU) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v0.AuxInt = 1 + v0.AuxInt = int64ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64) v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v2.AddArg(x) @@ -1007,7 +1007,7 @@ func rewriteValueMIPS64_OpEq64(v *Value) bool { y := v_1 v.reset(OpMIPS64SGTU) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v0.AuxInt = 1 + v0.AuxInt = int64ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64) v1.AddArg2(x, y) v.AddArg2(v0, v1) @@ -1042,7 +1042,7 @@ func rewriteValueMIPS64_OpEq8(v *Value) bool { y := v_1 v.reset(OpMIPS64SGTU) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v0.AuxInt = 1 + v0.AuxInt = int64ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64) v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v2.AddArg(x) @@ -1065,7 +1065,7 @@ func rewriteValueMIPS64_OpEqB(v *Value) bool { y := v_1 v.reset(OpMIPS64XOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v0.AuxInt = 1 + v0.AuxInt = int64ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.Bool) v1.AddArg2(x, y) v.AddArg2(v0, v1) @@ -1084,7 +1084,7 @@ func rewriteValueMIPS64_OpEqPtr(v *Value) bool { y := v_1 v.reset(OpMIPS64SGTU) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v0.AuxInt = 1 + v0.AuxInt = int64ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64) v1.AddArg2(x, y) v.AddArg2(v0, v1) @@ -1102,7 +1102,7 @@ func rewriteValueMIPS64_OpHmul32(v *Value) bool { x := v_0 y := v_1 v.reset(OpMIPS64SRAVconst) - v.AuxInt = 32 + v.AuxInt = int64ToAuxInt(32) v0 := b.NewValue0(v.Pos, OpSelect1, typ.Int64) v1 := b.NewValue0(v.Pos, OpMIPS64MULV, types.NewTuple(typ.Int64, typ.Int64)) v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) @@ -1126,7 +1126,7 @@ func rewriteValueMIPS64_OpHmul32u(v *Value) bool { x := v_0 y := v_1 v.reset(OpMIPS64SRLVconst) - v.AuxInt = 32 + v.AuxInt = int64ToAuxInt(32) v0 := b.NewValue0(v.Pos, OpSelect1, typ.UInt64) v1 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) @@ -1196,7 +1196,7 @@ func rewriteValueMIPS64_OpIsNonNil(v *Value) bool { ptr := v_0 v.reset(OpMIPS64SGTU) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v0.AuxInt = 0 + v0.AuxInt = int64ToAuxInt(0) v.AddArg2(ptr, v0) return true } @@ -1213,7 +1213,7 @@ func rewriteValueMIPS64_OpIsSliceInBounds(v *Value) bool { len := v_1 v.reset(OpMIPS64XOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v0.AuxInt = 1 + v0.AuxInt = int64ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v1.AddArg2(idx, len) v.AddArg2(v0, v1) @@ -1232,7 +1232,7 @@ func rewriteValueMIPS64_OpLeq16(v *Value) bool { y := v_1 v.reset(OpMIPS64XOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v0.AuxInt = 1 + v0.AuxInt = int64ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpMIPS64SGT, typ.Bool) v2 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v2.AddArg(x) @@ -1255,7 +1255,7 @@ func rewriteValueMIPS64_OpLeq16U(v *Value) bool { y := v_1 v.reset(OpMIPS64XOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v0.AuxInt = 1 + v0.AuxInt = int64ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v2.AddArg(x) @@ -1278,7 +1278,7 @@ func rewriteValueMIPS64_OpLeq32(v *Value) bool { y := v_1 v.reset(OpMIPS64XOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v0.AuxInt = 1 + v0.AuxInt = int64ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpMIPS64SGT, typ.Bool) v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v2.AddArg(x) @@ -1317,7 +1317,7 @@ func rewriteValueMIPS64_OpLeq32U(v *Value) bool { y := v_1 v.reset(OpMIPS64XOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v0.AuxInt = 1 + v0.AuxInt = int64ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v2.AddArg(x) @@ -1340,7 +1340,7 @@ func rewriteValueMIPS64_OpLeq64(v *Value) bool { y := v_1 v.reset(OpMIPS64XOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v0.AuxInt = 1 + v0.AuxInt = int64ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpMIPS64SGT, typ.Bool) v1.AddArg2(x, y) v.AddArg2(v0, v1) @@ -1375,7 +1375,7 @@ func rewriteValueMIPS64_OpLeq64U(v *Value) bool { y := v_1 v.reset(OpMIPS64XOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v0.AuxInt = 1 + v0.AuxInt = int64ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v1.AddArg2(x, y) v.AddArg2(v0, v1) @@ -1394,7 +1394,7 @@ func rewriteValueMIPS64_OpLeq8(v *Value) bool { y := v_1 v.reset(OpMIPS64XOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v0.AuxInt = 1 + v0.AuxInt = int64ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpMIPS64SGT, typ.Bool) v2 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v2.AddArg(x) @@ -1417,7 +1417,7 @@ func rewriteValueMIPS64_OpLeq8U(v *Value) bool { y := v_1 v.reset(OpMIPS64XOR) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v0.AuxInt = 1 + v0.AuxInt = int64ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v2.AddArg(x) @@ -1773,7 +1773,7 @@ func rewriteValueMIPS64_OpLsh16x16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v2.AuxInt = 64 + v2.AuxInt = int64ToAuxInt(64) v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v3.AddArg(y) v1.AddArg2(v2, v3) @@ -1799,7 +1799,7 @@ func rewriteValueMIPS64_OpLsh16x32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v2.AuxInt = 64 + v2.AuxInt = int64ToAuxInt(64) v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v3.AddArg(y) v1.AddArg2(v2, v3) @@ -1825,7 +1825,7 @@ func rewriteValueMIPS64_OpLsh16x64(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v2.AuxInt = 64 + v2.AuxInt = int64ToAuxInt(64) v1.AddArg2(v2, y) v0.AddArg(v1) v3 := b.NewValue0(v.Pos, OpMIPS64SLLV, t) @@ -1849,7 +1849,7 @@ func rewriteValueMIPS64_OpLsh16x8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v2.AuxInt = 64 + v2.AuxInt = int64ToAuxInt(64) v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v3.AddArg(y) v1.AddArg2(v2, v3) @@ -1875,7 +1875,7 @@ func rewriteValueMIPS64_OpLsh32x16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v2.AuxInt = 64 + v2.AuxInt = int64ToAuxInt(64) v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v3.AddArg(y) v1.AddArg2(v2, v3) @@ -1901,7 +1901,7 @@ func rewriteValueMIPS64_OpLsh32x32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v2.AuxInt = 64 + v2.AuxInt = int64ToAuxInt(64) v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v3.AddArg(y) v1.AddArg2(v2, v3) @@ -1927,7 +1927,7 @@ func rewriteValueMIPS64_OpLsh32x64(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v2.AuxInt = 64 + v2.AuxInt = int64ToAuxInt(64) v1.AddArg2(v2, y) v0.AddArg(v1) v3 := b.NewValue0(v.Pos, OpMIPS64SLLV, t) @@ -1951,7 +1951,7 @@ func rewriteValueMIPS64_OpLsh32x8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v2.AuxInt = 64 + v2.AuxInt = int64ToAuxInt(64) v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v3.AddArg(y) v1.AddArg2(v2, v3) @@ -1977,7 +1977,7 @@ func rewriteValueMIPS64_OpLsh64x16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v2.AuxInt = 64 + v2.AuxInt = int64ToAuxInt(64) v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v3.AddArg(y) v1.AddArg2(v2, v3) @@ -2003,7 +2003,7 @@ func rewriteValueMIPS64_OpLsh64x32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v2.AuxInt = 64 + v2.AuxInt = int64ToAuxInt(64) v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v3.AddArg(y) v1.AddArg2(v2, v3) @@ -2029,7 +2029,7 @@ func rewriteValueMIPS64_OpLsh64x64(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v2.AuxInt = 64 + v2.AuxInt = int64ToAuxInt(64) v1.AddArg2(v2, y) v0.AddArg(v1) v3 := b.NewValue0(v.Pos, OpMIPS64SLLV, t) @@ -2053,7 +2053,7 @@ func rewriteValueMIPS64_OpLsh64x8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v2.AuxInt = 64 + v2.AuxInt = int64ToAuxInt(64) v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v3.AddArg(y) v1.AddArg2(v2, v3) @@ -2079,7 +2079,7 @@ func rewriteValueMIPS64_OpLsh8x16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v2.AuxInt = 64 + v2.AuxInt = int64ToAuxInt(64) v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v3.AddArg(y) v1.AddArg2(v2, v3) @@ -2105,7 +2105,7 @@ func rewriteValueMIPS64_OpLsh8x32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v2.AuxInt = 64 + v2.AuxInt = int64ToAuxInt(64) v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v3.AddArg(y) v1.AddArg2(v2, v3) @@ -2131,7 +2131,7 @@ func rewriteValueMIPS64_OpLsh8x64(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v2.AuxInt = 64 + v2.AuxInt = int64ToAuxInt(64) v1.AddArg2(v2, y) v0.AddArg(v1) v3 := b.NewValue0(v.Pos, OpMIPS64SLLV, t) @@ -2155,7 +2155,7 @@ func rewriteValueMIPS64_OpLsh8x8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v2.AuxInt = 64 + v2.AuxInt = int64ToAuxInt(64) v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v3.AddArg(y) v1.AddArg2(v2, v3) @@ -2178,12 +2178,12 @@ func rewriteValueMIPS64_OpMIPS64ADDV(v *Value) bool { if v_1.Op != OpMIPS64MOVVconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(is32Bit(c)) { continue } v.reset(OpMIPS64ADDVconst) - v.AuxInt = c + v.AuxInt = int64ToAuxInt(c) v.AddArg(x) return true } @@ -2227,7 +2227,7 @@ func rewriteValueMIPS64_OpMIPS64ADDVconst(v *Value) bool { // match: (ADDVconst [0] x) // result: x for { - if v.AuxInt != 0 { + if auxIntToInt64(v.AuxInt) != 0 { break } x := v_0 @@ -2237,30 +2237,30 @@ func rewriteValueMIPS64_OpMIPS64ADDVconst(v *Value) bool { // match: (ADDVconst [c] (MOVVconst [d])) // result: (MOVVconst [c+d]) for { - c := v.AuxInt + c := auxIntToInt64(v.AuxInt) if v_0.Op != OpMIPS64MOVVconst { break } - d := v_0.AuxInt + d := auxIntToInt64(v_0.AuxInt) v.reset(OpMIPS64MOVVconst) - v.AuxInt = c + d + v.AuxInt = int64ToAuxInt(c + d) return true } // match: (ADDVconst [c] (ADDVconst [d] x)) // cond: is32Bit(c+d) // result: (ADDVconst [c+d] x) for { - c := v.AuxInt + c := auxIntToInt64(v.AuxInt) if v_0.Op != OpMIPS64ADDVconst { break } - d := v_0.AuxInt + d := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] if !(is32Bit(c + d)) { break } v.reset(OpMIPS64ADDVconst) - v.AuxInt = c + d + v.AuxInt = int64ToAuxInt(c + d) v.AddArg(x) return true } @@ -2268,17 +2268,17 @@ func rewriteValueMIPS64_OpMIPS64ADDVconst(v *Value) bool { // cond: is32Bit(c-d) // result: (ADDVconst [c-d] x) for { - c := v.AuxInt + c := auxIntToInt64(v.AuxInt) if v_0.Op != OpMIPS64SUBVconst { break } - d := v_0.AuxInt + d := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] if !(is32Bit(c - d)) { break } v.reset(OpMIPS64ADDVconst) - v.AuxInt = c - d + v.AuxInt = int64ToAuxInt(c - d) v.AddArg(x) return true } @@ -2296,12 +2296,12 @@ func rewriteValueMIPS64_OpMIPS64AND(v *Value) bool { if v_1.Op != OpMIPS64MOVVconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(is32Bit(c)) { continue } v.reset(OpMIPS64ANDconst) - v.AuxInt = c + v.AuxInt = int64ToAuxInt(c) v.AddArg(x) return true } @@ -2324,17 +2324,17 @@ func rewriteValueMIPS64_OpMIPS64ANDconst(v *Value) bool { // match: (ANDconst [0] _) // result: (MOVVconst [0]) for { - if v.AuxInt != 0 { + if auxIntToInt64(v.AuxInt) != 0 { break } v.reset(OpMIPS64MOVVconst) - v.AuxInt = 0 + v.AuxInt = int64ToAuxInt(0) return true } // match: (ANDconst [-1] x) // result: x for { - if v.AuxInt != -1 { + if auxIntToInt64(v.AuxInt) != -1 { break } x := v_0 @@ -2344,26 +2344,26 @@ func rewriteValueMIPS64_OpMIPS64ANDconst(v *Value) bool { // match: (ANDconst [c] (MOVVconst [d])) // result: (MOVVconst [c&d]) for { - c := v.AuxInt + c := auxIntToInt64(v.AuxInt) if v_0.Op != OpMIPS64MOVVconst { break } - d := v_0.AuxInt + d := auxIntToInt64(v_0.AuxInt) v.reset(OpMIPS64MOVVconst) - v.AuxInt = c & d + v.AuxInt = int64ToAuxInt(c & d) return true } // match: (ANDconst [c] (ANDconst [d] x)) // result: (ANDconst [c&d] x) for { - c := v.AuxInt + c := auxIntToInt64(v.AuxInt) if v_0.Op != OpMIPS64ANDconst { break } - d := v_0.AuxInt + d := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] v.reset(OpMIPS64ANDconst) - v.AuxInt = c & d + v.AuxInt = int64ToAuxInt(c & d) v.AddArg(x) return true } @@ -2531,9 +2531,9 @@ func rewriteValueMIPS64_OpMIPS64MOVBUreg(v *Value) bool { if v_0.Op != OpMIPS64MOVVconst { break } - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) v.reset(OpMIPS64MOVVconst) - v.AuxInt = int64(uint8(c)) + v.AuxInt = int64ToAuxInt(int64(uint8(c))) return true } return false @@ -2616,9 +2616,9 @@ func rewriteValueMIPS64_OpMIPS64MOVBreg(v *Value) bool { if v_0.Op != OpMIPS64MOVVconst { break } - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) v.reset(OpMIPS64MOVVconst) - v.AuxInt = int64(int8(c)) + v.AuxInt = int64ToAuxInt(int64(int8(c))) return true } return false @@ -2675,24 +2675,24 @@ func rewriteValueMIPS64_OpMIPS64MOVBstore(v *Value) bool { // match: (MOVBstore [off] {sym} ptr (MOVVconst [0]) mem) // result: (MOVBstorezero [off] {sym} ptr mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 - if v_1.Op != OpMIPS64MOVVconst || v_1.AuxInt != 0 { + if v_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 { break } mem := v_2 v.reset(OpMIPS64MOVBstorezero) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem) // result: (MOVBstore [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpMIPS64MOVBreg { break @@ -2700,16 +2700,16 @@ func rewriteValueMIPS64_OpMIPS64MOVBstore(v *Value) bool { x := v_1.Args[0] mem := v_2 v.reset(OpMIPS64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem) // result: (MOVBstore [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpMIPS64MOVBUreg { break @@ -2717,16 +2717,16 @@ func rewriteValueMIPS64_OpMIPS64MOVBstore(v *Value) bool { x := v_1.Args[0] mem := v_2 v.reset(OpMIPS64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem) // result: (MOVBstore [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpMIPS64MOVHreg { break @@ -2734,16 +2734,16 @@ func rewriteValueMIPS64_OpMIPS64MOVBstore(v *Value) bool { x := v_1.Args[0] mem := v_2 v.reset(OpMIPS64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem) // result: (MOVBstore [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpMIPS64MOVHUreg { break @@ -2751,16 +2751,16 @@ func rewriteValueMIPS64_OpMIPS64MOVBstore(v *Value) bool { x := v_1.Args[0] mem := v_2 v.reset(OpMIPS64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVWreg x) mem) // result: (MOVBstore [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpMIPS64MOVWreg { break @@ -2768,16 +2768,16 @@ func rewriteValueMIPS64_OpMIPS64MOVBstore(v *Value) bool { x := v_1.Args[0] mem := v_2 v.reset(OpMIPS64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVWUreg x) mem) // result: (MOVBstore [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpMIPS64MOVWUreg { break @@ -2785,8 +2785,8 @@ func rewriteValueMIPS64_OpMIPS64MOVBstore(v *Value) bool { x := v_1.Args[0] mem := v_2 v.reset(OpMIPS64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } @@ -3138,9 +3138,9 @@ func rewriteValueMIPS64_OpMIPS64MOVHUreg(v *Value) bool { if v_0.Op != OpMIPS64MOVVconst { break } - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) v.reset(OpMIPS64MOVVconst) - v.AuxInt = int64(uint16(c)) + v.AuxInt = int64ToAuxInt(int64(uint16(c))) return true } return false @@ -3267,9 +3267,9 @@ func rewriteValueMIPS64_OpMIPS64MOVHreg(v *Value) bool { if v_0.Op != OpMIPS64MOVVconst { break } - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) v.reset(OpMIPS64MOVVconst) - v.AuxInt = int64(int16(c)) + v.AuxInt = int64ToAuxInt(int64(int16(c))) return true } return false @@ -3326,24 +3326,24 @@ func rewriteValueMIPS64_OpMIPS64MOVHstore(v *Value) bool { // match: (MOVHstore [off] {sym} ptr (MOVVconst [0]) mem) // result: (MOVHstorezero [off] {sym} ptr mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 - if v_1.Op != OpMIPS64MOVVconst || v_1.AuxInt != 0 { + if v_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 { break } mem := v_2 v.reset(OpMIPS64MOVHstorezero) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem) // result: (MOVHstore [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpMIPS64MOVHreg { break @@ -3351,16 +3351,16 @@ func rewriteValueMIPS64_OpMIPS64MOVHstore(v *Value) bool { x := v_1.Args[0] mem := v_2 v.reset(OpMIPS64MOVHstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem) // result: (MOVHstore [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpMIPS64MOVHUreg { break @@ -3368,16 +3368,16 @@ func rewriteValueMIPS64_OpMIPS64MOVHstore(v *Value) bool { x := v_1.Args[0] mem := v_2 v.reset(OpMIPS64MOVHstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVWreg x) mem) // result: (MOVHstore [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpMIPS64MOVWreg { break @@ -3385,16 +3385,16 @@ func rewriteValueMIPS64_OpMIPS64MOVHstore(v *Value) bool { x := v_1.Args[0] mem := v_2 v.reset(OpMIPS64MOVHstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVWUreg x) mem) // result: (MOVHstore [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpMIPS64MOVWUreg { break @@ -3402,8 +3402,8 @@ func rewriteValueMIPS64_OpMIPS64MOVHstore(v *Value) bool { x := v_1.Args[0] mem := v_2 v.reset(OpMIPS64MOVHstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } @@ -3525,9 +3525,9 @@ func rewriteValueMIPS64_OpMIPS64MOVVreg(v *Value) bool { if v_0.Op != OpMIPS64MOVVconst { break } - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) v.reset(OpMIPS64MOVVconst) - v.AuxInt = c + v.AuxInt = int64ToAuxInt(c) return true } return false @@ -3584,16 +3584,16 @@ func rewriteValueMIPS64_OpMIPS64MOVVstore(v *Value) bool { // match: (MOVVstore [off] {sym} ptr (MOVVconst [0]) mem) // result: (MOVVstorezero [off] {sym} ptr mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 - if v_1.Op != OpMIPS64MOVVconst || v_1.AuxInt != 0 { + if v_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 { break } mem := v_2 v.reset(OpMIPS64MOVVstorezero) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } @@ -3769,9 +3769,9 @@ func rewriteValueMIPS64_OpMIPS64MOVWUreg(v *Value) bool { if v_0.Op != OpMIPS64MOVVconst { break } - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) v.reset(OpMIPS64MOVVconst) - v.AuxInt = int64(uint32(c)) + v.AuxInt = int64ToAuxInt(int64(uint32(c))) return true } return false @@ -3931,9 +3931,9 @@ func rewriteValueMIPS64_OpMIPS64MOVWreg(v *Value) bool { if v_0.Op != OpMIPS64MOVVconst { break } - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) v.reset(OpMIPS64MOVVconst) - v.AuxInt = int64(int32(c)) + v.AuxInt = int64ToAuxInt(int64(int32(c))) return true } return false @@ -3990,24 +3990,24 @@ func rewriteValueMIPS64_OpMIPS64MOVWstore(v *Value) bool { // match: (MOVWstore [off] {sym} ptr (MOVVconst [0]) mem) // result: (MOVWstorezero [off] {sym} ptr mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 - if v_1.Op != OpMIPS64MOVVconst || v_1.AuxInt != 0 { + if v_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 { break } mem := v_2 v.reset(OpMIPS64MOVWstorezero) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem) // result: (MOVWstore [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpMIPS64MOVWreg { break @@ -4015,16 +4015,16 @@ func rewriteValueMIPS64_OpMIPS64MOVWstore(v *Value) bool { x := v_1.Args[0] mem := v_2 v.reset(OpMIPS64MOVWstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } // match: (MOVWstore [off] {sym} ptr (MOVWUreg x) mem) // result: (MOVWstore [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpMIPS64MOVWUreg { break @@ -4032,8 +4032,8 @@ func rewriteValueMIPS64_OpMIPS64MOVWstore(v *Value) bool { x := v_1.Args[0] mem := v_2 v.reset(OpMIPS64MOVWstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } @@ -4095,9 +4095,9 @@ func rewriteValueMIPS64_OpMIPS64NEGV(v *Value) bool { if v_0.Op != OpMIPS64MOVVconst { break } - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) v.reset(OpMIPS64MOVVconst) - v.AuxInt = -c + v.AuxInt = int64ToAuxInt(-c) return true } return false @@ -4114,12 +4114,12 @@ func rewriteValueMIPS64_OpMIPS64NOR(v *Value) bool { if v_1.Op != OpMIPS64MOVVconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(is32Bit(c)) { continue } v.reset(OpMIPS64NORconst) - v.AuxInt = c + v.AuxInt = int64ToAuxInt(c) v.AddArg(x) return true } @@ -4132,13 +4132,13 @@ func rewriteValueMIPS64_OpMIPS64NORconst(v *Value) bool { // match: (NORconst [c] (MOVVconst [d])) // result: (MOVVconst [^(c|d)]) for { - c := v.AuxInt + c := auxIntToInt64(v.AuxInt) if v_0.Op != OpMIPS64MOVVconst { break } - d := v_0.AuxInt + d := auxIntToInt64(v_0.AuxInt) v.reset(OpMIPS64MOVVconst) - v.AuxInt = ^(c | d) + v.AuxInt = int64ToAuxInt(^(c | d)) return true } return false @@ -4155,12 +4155,12 @@ func rewriteValueMIPS64_OpMIPS64OR(v *Value) bool { if v_1.Op != OpMIPS64MOVVconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(is32Bit(c)) { continue } v.reset(OpMIPS64ORconst) - v.AuxInt = c + v.AuxInt = int64ToAuxInt(c) v.AddArg(x) return true } @@ -4183,7 +4183,7 @@ func rewriteValueMIPS64_OpMIPS64ORconst(v *Value) bool { // match: (ORconst [0] x) // result: x for { - if v.AuxInt != 0 { + if auxIntToInt64(v.AuxInt) != 0 { break } x := v_0 @@ -4193,40 +4193,40 @@ func rewriteValueMIPS64_OpMIPS64ORconst(v *Value) bool { // match: (ORconst [-1] _) // result: (MOVVconst [-1]) for { - if v.AuxInt != -1 { + if auxIntToInt64(v.AuxInt) != -1 { break } v.reset(OpMIPS64MOVVconst) - v.AuxInt = -1 + v.AuxInt = int64ToAuxInt(-1) return true } // match: (ORconst [c] (MOVVconst [d])) // result: (MOVVconst [c|d]) for { - c := v.AuxInt + c := auxIntToInt64(v.AuxInt) if v_0.Op != OpMIPS64MOVVconst { break } - d := v_0.AuxInt + d := auxIntToInt64(v_0.AuxInt) v.reset(OpMIPS64MOVVconst) - v.AuxInt = c | d + v.AuxInt = int64ToAuxInt(c | d) return true } // match: (ORconst [c] (ORconst [d] x)) // cond: is32Bit(c|d) // result: (ORconst [c|d] x) for { - c := v.AuxInt + c := auxIntToInt64(v.AuxInt) if v_0.Op != OpMIPS64ORconst { break } - d := v_0.AuxInt + d := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] if !(is32Bit(c | d)) { break } v.reset(OpMIPS64ORconst) - v.AuxInt = c | d + v.AuxInt = int64ToAuxInt(c | d) v.AddArg(x) return true } @@ -4242,13 +4242,13 @@ func rewriteValueMIPS64_OpMIPS64SGT(v *Value) bool { if v_0.Op != OpMIPS64MOVVconst { break } - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) x := v_1 if !(is32Bit(c)) { break } v.reset(OpMIPS64SGTconst) - v.AuxInt = c + v.AuxInt = int64ToAuxInt(c) v.AddArg(x) return true } @@ -4264,13 +4264,13 @@ func rewriteValueMIPS64_OpMIPS64SGTU(v *Value) bool { if v_0.Op != OpMIPS64MOVVconst { break } - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) x := v_1 if !(is32Bit(c)) { break } v.reset(OpMIPS64SGTUconst) - v.AuxInt = c + v.AuxInt = int64ToAuxInt(c) v.AddArg(x) return true } @@ -4282,88 +4282,88 @@ func rewriteValueMIPS64_OpMIPS64SGTUconst(v *Value) bool { // cond: uint64(c)>uint64(d) // result: (MOVVconst [1]) for { - c := v.AuxInt + c := auxIntToInt64(v.AuxInt) if v_0.Op != OpMIPS64MOVVconst { break } - d := v_0.AuxInt + d := auxIntToInt64(v_0.AuxInt) if !(uint64(c) > uint64(d)) { break } v.reset(OpMIPS64MOVVconst) - v.AuxInt = 1 + v.AuxInt = int64ToAuxInt(1) return true } // match: (SGTUconst [c] (MOVVconst [d])) // cond: uint64(c)<=uint64(d) // result: (MOVVconst [0]) for { - c := v.AuxInt + c := auxIntToInt64(v.AuxInt) if v_0.Op != OpMIPS64MOVVconst { break } - d := v_0.AuxInt + d := auxIntToInt64(v_0.AuxInt) if !(uint64(c) <= uint64(d)) { break } v.reset(OpMIPS64MOVVconst) - v.AuxInt = 0 + v.AuxInt = int64ToAuxInt(0) return true } // match: (SGTUconst [c] (MOVBUreg _)) // cond: 0xff < uint64(c) // result: (MOVVconst [1]) for { - c := v.AuxInt + c := auxIntToInt64(v.AuxInt) if v_0.Op != OpMIPS64MOVBUreg || !(0xff < uint64(c)) { break } v.reset(OpMIPS64MOVVconst) - v.AuxInt = 1 + v.AuxInt = int64ToAuxInt(1) return true } // match: (SGTUconst [c] (MOVHUreg _)) // cond: 0xffff < uint64(c) // result: (MOVVconst [1]) for { - c := v.AuxInt + c := auxIntToInt64(v.AuxInt) if v_0.Op != OpMIPS64MOVHUreg || !(0xffff < uint64(c)) { break } v.reset(OpMIPS64MOVVconst) - v.AuxInt = 1 + v.AuxInt = int64ToAuxInt(1) return true } // match: (SGTUconst [c] (ANDconst [m] _)) // cond: uint64(m) < uint64(c) // result: (MOVVconst [1]) for { - c := v.AuxInt + c := auxIntToInt64(v.AuxInt) if v_0.Op != OpMIPS64ANDconst { break } - m := v_0.AuxInt + m := auxIntToInt64(v_0.AuxInt) if !(uint64(m) < uint64(c)) { break } v.reset(OpMIPS64MOVVconst) - v.AuxInt = 1 + v.AuxInt = int64ToAuxInt(1) return true } // match: (SGTUconst [c] (SRLVconst _ [d])) // cond: 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c) // result: (MOVVconst [1]) for { - c := v.AuxInt + c := auxIntToInt64(v.AuxInt) if v_0.Op != OpMIPS64SRLVconst { break } - d := v_0.AuxInt + d := auxIntToInt64(v_0.AuxInt) if !(0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c)) { break } v.reset(OpMIPS64MOVVconst) - v.AuxInt = 1 + v.AuxInt = int64ToAuxInt(1) return true } return false @@ -4374,172 +4374,172 @@ func rewriteValueMIPS64_OpMIPS64SGTconst(v *Value) bool { // cond: c>d // result: (MOVVconst [1]) for { - c := v.AuxInt + c := auxIntToInt64(v.AuxInt) if v_0.Op != OpMIPS64MOVVconst { break } - d := v_0.AuxInt + d := auxIntToInt64(v_0.AuxInt) if !(c > d) { break } v.reset(OpMIPS64MOVVconst) - v.AuxInt = 1 + v.AuxInt = int64ToAuxInt(1) return true } // match: (SGTconst [c] (MOVVconst [d])) // cond: c<=d // result: (MOVVconst [0]) for { - c := v.AuxInt + c := auxIntToInt64(v.AuxInt) if v_0.Op != OpMIPS64MOVVconst { break } - d := v_0.AuxInt + d := auxIntToInt64(v_0.AuxInt) if !(c <= d) { break } v.reset(OpMIPS64MOVVconst) - v.AuxInt = 0 + v.AuxInt = int64ToAuxInt(0) return true } // match: (SGTconst [c] (MOVBreg _)) // cond: 0x7f < c // result: (MOVVconst [1]) for { - c := v.AuxInt + c := auxIntToInt64(v.AuxInt) if v_0.Op != OpMIPS64MOVBreg || !(0x7f < c) { break } v.reset(OpMIPS64MOVVconst) - v.AuxInt = 1 + v.AuxInt = int64ToAuxInt(1) return true } // match: (SGTconst [c] (MOVBreg _)) // cond: c <= -0x80 // result: (MOVVconst [0]) for { - c := v.AuxInt + c := auxIntToInt64(v.AuxInt) if v_0.Op != OpMIPS64MOVBreg || !(c <= -0x80) { break } v.reset(OpMIPS64MOVVconst) - v.AuxInt = 0 + v.AuxInt = int64ToAuxInt(0) return true } // match: (SGTconst [c] (MOVBUreg _)) // cond: 0xff < c // result: (MOVVconst [1]) for { - c := v.AuxInt + c := auxIntToInt64(v.AuxInt) if v_0.Op != OpMIPS64MOVBUreg || !(0xff < c) { break } v.reset(OpMIPS64MOVVconst) - v.AuxInt = 1 + v.AuxInt = int64ToAuxInt(1) return true } // match: (SGTconst [c] (MOVBUreg _)) // cond: c < 0 // result: (MOVVconst [0]) for { - c := v.AuxInt + c := auxIntToInt64(v.AuxInt) if v_0.Op != OpMIPS64MOVBUreg || !(c < 0) { break } v.reset(OpMIPS64MOVVconst) - v.AuxInt = 0 + v.AuxInt = int64ToAuxInt(0) return true } // match: (SGTconst [c] (MOVHreg _)) // cond: 0x7fff < c // result: (MOVVconst [1]) for { - c := v.AuxInt + c := auxIntToInt64(v.AuxInt) if v_0.Op != OpMIPS64MOVHreg || !(0x7fff < c) { break } v.reset(OpMIPS64MOVVconst) - v.AuxInt = 1 + v.AuxInt = int64ToAuxInt(1) return true } // match: (SGTconst [c] (MOVHreg _)) // cond: c <= -0x8000 // result: (MOVVconst [0]) for { - c := v.AuxInt + c := auxIntToInt64(v.AuxInt) if v_0.Op != OpMIPS64MOVHreg || !(c <= -0x8000) { break } v.reset(OpMIPS64MOVVconst) - v.AuxInt = 0 + v.AuxInt = int64ToAuxInt(0) return true } // match: (SGTconst [c] (MOVHUreg _)) // cond: 0xffff < c // result: (MOVVconst [1]) for { - c := v.AuxInt + c := auxIntToInt64(v.AuxInt) if v_0.Op != OpMIPS64MOVHUreg || !(0xffff < c) { break } v.reset(OpMIPS64MOVVconst) - v.AuxInt = 1 + v.AuxInt = int64ToAuxInt(1) return true } // match: (SGTconst [c] (MOVHUreg _)) // cond: c < 0 // result: (MOVVconst [0]) for { - c := v.AuxInt + c := auxIntToInt64(v.AuxInt) if v_0.Op != OpMIPS64MOVHUreg || !(c < 0) { break } v.reset(OpMIPS64MOVVconst) - v.AuxInt = 0 + v.AuxInt = int64ToAuxInt(0) return true } // match: (SGTconst [c] (MOVWUreg _)) // cond: c < 0 // result: (MOVVconst [0]) for { - c := v.AuxInt + c := auxIntToInt64(v.AuxInt) if v_0.Op != OpMIPS64MOVWUreg || !(c < 0) { break } v.reset(OpMIPS64MOVVconst) - v.AuxInt = 0 + v.AuxInt = int64ToAuxInt(0) return true } // match: (SGTconst [c] (ANDconst [m] _)) // cond: 0 <= m && m < c // result: (MOVVconst [1]) for { - c := v.AuxInt + c := auxIntToInt64(v.AuxInt) if v_0.Op != OpMIPS64ANDconst { break } - m := v_0.AuxInt + m := auxIntToInt64(v_0.AuxInt) if !(0 <= m && m < c) { break } v.reset(OpMIPS64MOVVconst) - v.AuxInt = 1 + v.AuxInt = int64ToAuxInt(1) return true } // match: (SGTconst [c] (SRLVconst _ [d])) // cond: 0 <= c && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c) // result: (MOVVconst [1]) for { - c := v.AuxInt + c := auxIntToInt64(v.AuxInt) if v_0.Op != OpMIPS64SRLVconst { break } - d := v_0.AuxInt + d := auxIntToInt64(v_0.AuxInt) if !(0 <= c && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c)) { break } v.reset(OpMIPS64MOVVconst) - v.AuxInt = 1 + v.AuxInt = int64ToAuxInt(1) return true } return false @@ -4554,12 +4554,12 @@ func rewriteValueMIPS64_OpMIPS64SLLV(v *Value) bool { if v_1.Op != OpMIPS64MOVVconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(uint64(c) >= 64) { break } v.reset(OpMIPS64MOVVconst) - v.AuxInt = 0 + v.AuxInt = int64ToAuxInt(0) return true } // match: (SLLV x (MOVVconst [c])) @@ -4569,9 +4569,9 @@ func rewriteValueMIPS64_OpMIPS64SLLV(v *Value) bool { if v_1.Op != OpMIPS64MOVVconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpMIPS64SLLVconst) - v.AuxInt = c + v.AuxInt = int64ToAuxInt(c) v.AddArg(x) return true } @@ -4582,13 +4582,13 @@ func rewriteValueMIPS64_OpMIPS64SLLVconst(v *Value) bool { // match: (SLLVconst [c] (MOVVconst [d])) // result: (MOVVconst [d<= 64) { break } v.reset(OpMIPS64SRAVconst) - v.AuxInt = 63 + v.AuxInt = int64ToAuxInt(63) v.AddArg(x) return true } @@ -4620,9 +4620,9 @@ func rewriteValueMIPS64_OpMIPS64SRAV(v *Value) bool { if v_1.Op != OpMIPS64MOVVconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpMIPS64SRAVconst) - v.AuxInt = c + v.AuxInt = int64ToAuxInt(c) v.AddArg(x) return true } @@ -4633,13 +4633,13 @@ func rewriteValueMIPS64_OpMIPS64SRAVconst(v *Value) bool { // match: (SRAVconst [c] (MOVVconst [d])) // result: (MOVVconst [d>>uint64(c)]) for { - c := v.AuxInt + c := auxIntToInt64(v.AuxInt) if v_0.Op != OpMIPS64MOVVconst { break } - d := v_0.AuxInt + d := auxIntToInt64(v_0.AuxInt) v.reset(OpMIPS64MOVVconst) - v.AuxInt = d >> uint64(c) + v.AuxInt = int64ToAuxInt(d >> uint64(c)) return true } return false @@ -4654,12 +4654,12 @@ func rewriteValueMIPS64_OpMIPS64SRLV(v *Value) bool { if v_1.Op != OpMIPS64MOVVconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(uint64(c) >= 64) { break } v.reset(OpMIPS64MOVVconst) - v.AuxInt = 0 + v.AuxInt = int64ToAuxInt(0) return true } // match: (SRLV x (MOVVconst [c])) @@ -4669,9 +4669,9 @@ func rewriteValueMIPS64_OpMIPS64SRLV(v *Value) bool { if v_1.Op != OpMIPS64MOVVconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpMIPS64SRLVconst) - v.AuxInt = c + v.AuxInt = int64ToAuxInt(c) v.AddArg(x) return true } @@ -4682,13 +4682,13 @@ func rewriteValueMIPS64_OpMIPS64SRLVconst(v *Value) bool { // match: (SRLVconst [c] (MOVVconst [d])) // result: (MOVVconst [int64(uint64(d)>>uint64(c))]) for { - c := v.AuxInt + c := auxIntToInt64(v.AuxInt) if v_0.Op != OpMIPS64MOVVconst { break } - d := v_0.AuxInt + d := auxIntToInt64(v_0.AuxInt) v.reset(OpMIPS64MOVVconst) - v.AuxInt = int64(uint64(d) >> uint64(c)) + v.AuxInt = int64ToAuxInt(int64(uint64(d) >> uint64(c))) return true } return false @@ -4704,12 +4704,12 @@ func rewriteValueMIPS64_OpMIPS64SUBV(v *Value) bool { if v_1.Op != OpMIPS64MOVVconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(is32Bit(c)) { break } v.reset(OpMIPS64SUBVconst) - v.AuxInt = c + v.AuxInt = int64ToAuxInt(c) v.AddArg(x) return true } @@ -4721,13 +4721,13 @@ func rewriteValueMIPS64_OpMIPS64SUBV(v *Value) bool { break } v.reset(OpMIPS64MOVVconst) - v.AuxInt = 0 + v.AuxInt = int64ToAuxInt(0) return true } // match: (SUBV (MOVVconst [0]) x) // result: (NEGV x) for { - if v_0.Op != OpMIPS64MOVVconst || v_0.AuxInt != 0 { + if v_0.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0.AuxInt) != 0 { break } x := v_1 @@ -4742,7 +4742,7 @@ func rewriteValueMIPS64_OpMIPS64SUBVconst(v *Value) bool { // match: (SUBVconst [0] x) // result: x for { - if v.AuxInt != 0 { + if auxIntToInt64(v.AuxInt) != 0 { break } x := v_0 @@ -4752,30 +4752,30 @@ func rewriteValueMIPS64_OpMIPS64SUBVconst(v *Value) bool { // match: (SUBVconst [c] (MOVVconst [d])) // result: (MOVVconst [d-c]) for { - c := v.AuxInt + c := auxIntToInt64(v.AuxInt) if v_0.Op != OpMIPS64MOVVconst { break } - d := v_0.AuxInt + d := auxIntToInt64(v_0.AuxInt) v.reset(OpMIPS64MOVVconst) - v.AuxInt = d - c + v.AuxInt = int64ToAuxInt(d - c) return true } // match: (SUBVconst [c] (SUBVconst [d] x)) // cond: is32Bit(-c-d) // result: (ADDVconst [-c-d] x) for { - c := v.AuxInt + c := auxIntToInt64(v.AuxInt) if v_0.Op != OpMIPS64SUBVconst { break } - d := v_0.AuxInt + d := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] if !(is32Bit(-c - d)) { break } v.reset(OpMIPS64ADDVconst) - v.AuxInt = -c - d + v.AuxInt = int64ToAuxInt(-c - d) v.AddArg(x) return true } @@ -4783,17 +4783,17 @@ func rewriteValueMIPS64_OpMIPS64SUBVconst(v *Value) bool { // cond: is32Bit(-c+d) // result: (ADDVconst [-c+d] x) for { - c := v.AuxInt + c := auxIntToInt64(v.AuxInt) if v_0.Op != OpMIPS64ADDVconst { break } - d := v_0.AuxInt + d := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] if !(is32Bit(-c + d)) { break } v.reset(OpMIPS64ADDVconst) - v.AuxInt = -c + d + v.AuxInt = int64ToAuxInt(-c + d) v.AddArg(x) return true } @@ -4811,12 +4811,12 @@ func rewriteValueMIPS64_OpMIPS64XOR(v *Value) bool { if v_1.Op != OpMIPS64MOVVconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(is32Bit(c)) { continue } v.reset(OpMIPS64XORconst) - v.AuxInt = c + v.AuxInt = int64ToAuxInt(c) v.AddArg(x) return true } @@ -4830,7 +4830,7 @@ func rewriteValueMIPS64_OpMIPS64XOR(v *Value) bool { break } v.reset(OpMIPS64MOVVconst) - v.AuxInt = 0 + v.AuxInt = int64ToAuxInt(0) return true } return false @@ -4840,7 +4840,7 @@ func rewriteValueMIPS64_OpMIPS64XORconst(v *Value) bool { // match: (XORconst [0] x) // result: x for { - if v.AuxInt != 0 { + if auxIntToInt64(v.AuxInt) != 0 { break } x := v_0 @@ -4850,42 +4850,42 @@ func rewriteValueMIPS64_OpMIPS64XORconst(v *Value) bool { // match: (XORconst [-1] x) // result: (NORconst [0] x) for { - if v.AuxInt != -1 { + if auxIntToInt64(v.AuxInt) != -1 { break } x := v_0 v.reset(OpMIPS64NORconst) - v.AuxInt = 0 + v.AuxInt = int64ToAuxInt(0) v.AddArg(x) return true } // match: (XORconst [c] (MOVVconst [d])) // result: (MOVVconst [c^d]) for { - c := v.AuxInt + c := auxIntToInt64(v.AuxInt) if v_0.Op != OpMIPS64MOVVconst { break } - d := v_0.AuxInt + d := auxIntToInt64(v_0.AuxInt) v.reset(OpMIPS64MOVVconst) - v.AuxInt = c ^ d + v.AuxInt = int64ToAuxInt(c ^ d) return true } // match: (XORconst [c] (XORconst [d] x)) // cond: is32Bit(c^d) // result: (XORconst [c^d] x) for { - c := v.AuxInt + c := auxIntToInt64(v.AuxInt) if v_0.Op != OpMIPS64XORconst { break } - d := v_0.AuxInt + d := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] if !(is32Bit(c ^ d)) { break } v.reset(OpMIPS64XORconst) - v.AuxInt = c ^ d + v.AuxInt = int64ToAuxInt(c ^ d) v.AddArg(x) return true } @@ -5061,7 +5061,7 @@ func rewriteValueMIPS64_OpMove(v *Value) bool { // match: (Move [0] _ _ mem) // result: mem for { - if v.AuxInt != 0 { + if auxIntToInt64(v.AuxInt) != 0 { break } mem := v_2 @@ -5071,7 +5071,7 @@ func rewriteValueMIPS64_OpMove(v *Value) bool { // match: (Move [1] dst src mem) // result: (MOVBstore dst (MOVBload src mem) mem) for { - if v.AuxInt != 1 { + if auxIntToInt64(v.AuxInt) != 1 { break } dst := v_0 @@ -5084,17 +5084,17 @@ func rewriteValueMIPS64_OpMove(v *Value) bool { return true } // match: (Move [2] {t} dst src mem) - // cond: t.(*types.Type).Alignment()%2 == 0 + // cond: t.Alignment()%2 == 0 // result: (MOVHstore dst (MOVHload src mem) mem) for { - if v.AuxInt != 2 { + if auxIntToInt64(v.AuxInt) != 2 { break } - t := v.Aux + t := auxToType(v.Aux) dst := v_0 src := v_1 mem := v_2 - if !(t.(*types.Type).Alignment()%2 == 0) { + if !(t.Alignment()%2 == 0) { break } v.reset(OpMIPS64MOVHstore) @@ -5106,16 +5106,16 @@ func rewriteValueMIPS64_OpMove(v *Value) bool { // match: (Move [2] dst src mem) // result: (MOVBstore [1] dst (MOVBload [1] src mem) (MOVBstore dst (MOVBload src mem) mem)) for { - if v.AuxInt != 2 { + if auxIntToInt64(v.AuxInt) != 2 { break } dst := v_0 src := v_1 mem := v_2 v.reset(OpMIPS64MOVBstore) - v.AuxInt = 1 + v.AuxInt = int32ToAuxInt(1) v0 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8) - v0.AuxInt = 1 + v0.AuxInt = int32ToAuxInt(1) v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem) v2 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8) @@ -5125,17 +5125,17 @@ func rewriteValueMIPS64_OpMove(v *Value) bool { return true } // match: (Move [4] {t} dst src mem) - // cond: t.(*types.Type).Alignment()%4 == 0 + // cond: t.Alignment()%4 == 0 // result: (MOVWstore dst (MOVWload src mem) mem) for { - if v.AuxInt != 4 { + if auxIntToInt64(v.AuxInt) != 4 { break } - t := v.Aux + t := auxToType(v.Aux) dst := v_0 src := v_1 mem := v_2 - if !(t.(*types.Type).Alignment()%4 == 0) { + if !(t.Alignment()%4 == 0) { break } v.reset(OpMIPS64MOVWstore) @@ -5145,23 +5145,23 @@ func rewriteValueMIPS64_OpMove(v *Value) bool { return true } // match: (Move [4] {t} dst src mem) - // cond: t.(*types.Type).Alignment()%2 == 0 + // cond: t.Alignment()%2 == 0 // result: (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem)) for { - if v.AuxInt != 4 { + if auxIntToInt64(v.AuxInt) != 4 { break } - t := v.Aux + t := auxToType(v.Aux) dst := v_0 src := v_1 mem := v_2 - if !(t.(*types.Type).Alignment()%2 == 0) { + if !(t.Alignment()%2 == 0) { break } v.reset(OpMIPS64MOVHstore) - v.AuxInt = 2 + v.AuxInt = int32ToAuxInt(2) v0 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16) - v0.AuxInt = 2 + v0.AuxInt = int32ToAuxInt(2) v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem) v2 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16) @@ -5173,26 +5173,26 @@ func rewriteValueMIPS64_OpMove(v *Value) bool { // match: (Move [4] dst src mem) // result: (MOVBstore [3] dst (MOVBload [3] src mem) (MOVBstore [2] dst (MOVBload [2] src mem) (MOVBstore [1] dst (MOVBload [1] src mem) (MOVBstore dst (MOVBload src mem) mem)))) for { - if v.AuxInt != 4 { + if auxIntToInt64(v.AuxInt) != 4 { break } dst := v_0 src := v_1 mem := v_2 v.reset(OpMIPS64MOVBstore) - v.AuxInt = 3 + v.AuxInt = int32ToAuxInt(3) v0 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8) - v0.AuxInt = 3 + v0.AuxInt = int32ToAuxInt(3) v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem) - v1.AuxInt = 2 + v1.AuxInt = int32ToAuxInt(2) v2 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8) - v2.AuxInt = 2 + v2.AuxInt = int32ToAuxInt(2) v2.AddArg2(src, mem) v3 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem) - v3.AuxInt = 1 + v3.AuxInt = int32ToAuxInt(1) v4 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8) - v4.AuxInt = 1 + v4.AuxInt = int32ToAuxInt(1) v4.AddArg2(src, mem) v5 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem) v6 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8) @@ -5204,17 +5204,17 @@ func rewriteValueMIPS64_OpMove(v *Value) bool { return true } // match: (Move [8] {t} dst src mem) - // cond: t.(*types.Type).Alignment()%8 == 0 + // cond: t.Alignment()%8 == 0 // result: (MOVVstore dst (MOVVload src mem) mem) for { - if v.AuxInt != 8 { + if auxIntToInt64(v.AuxInt) != 8 { break } - t := v.Aux + t := auxToType(v.Aux) dst := v_0 src := v_1 mem := v_2 - if !(t.(*types.Type).Alignment()%8 == 0) { + if !(t.Alignment()%8 == 0) { break } v.reset(OpMIPS64MOVVstore) @@ -5224,23 +5224,23 @@ func rewriteValueMIPS64_OpMove(v *Value) bool { return true } // match: (Move [8] {t} dst src mem) - // cond: t.(*types.Type).Alignment()%4 == 0 + // cond: t.Alignment()%4 == 0 // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem)) for { - if v.AuxInt != 8 { + if auxIntToInt64(v.AuxInt) != 8 { break } - t := v.Aux + t := auxToType(v.Aux) dst := v_0 src := v_1 mem := v_2 - if !(t.(*types.Type).Alignment()%4 == 0) { + if !(t.Alignment()%4 == 0) { break } v.reset(OpMIPS64MOVWstore) - v.AuxInt = 4 + v.AuxInt = int32ToAuxInt(4) v0 := b.NewValue0(v.Pos, OpMIPS64MOVWload, typ.Int32) - v0.AuxInt = 4 + v0.AuxInt = int32ToAuxInt(4) v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem) v2 := b.NewValue0(v.Pos, OpMIPS64MOVWload, typ.Int32) @@ -5250,33 +5250,33 @@ func rewriteValueMIPS64_OpMove(v *Value) bool { return true } // match: (Move [8] {t} dst src mem) - // cond: t.(*types.Type).Alignment()%2 == 0 + // cond: t.Alignment()%2 == 0 // result: (MOVHstore [6] dst (MOVHload [6] src mem) (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem)))) for { - if v.AuxInt != 8 { + if auxIntToInt64(v.AuxInt) != 8 { break } - t := v.Aux + t := auxToType(v.Aux) dst := v_0 src := v_1 mem := v_2 - if !(t.(*types.Type).Alignment()%2 == 0) { + if !(t.Alignment()%2 == 0) { break } v.reset(OpMIPS64MOVHstore) - v.AuxInt = 6 + v.AuxInt = int32ToAuxInt(6) v0 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16) - v0.AuxInt = 6 + v0.AuxInt = int32ToAuxInt(6) v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem) - v1.AuxInt = 4 + v1.AuxInt = int32ToAuxInt(4) v2 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16) - v2.AuxInt = 4 + v2.AuxInt = int32ToAuxInt(4) v2.AddArg2(src, mem) v3 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem) - v3.AuxInt = 2 + v3.AuxInt = int32ToAuxInt(2) v4 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16) - v4.AuxInt = 2 + v4.AuxInt = int32ToAuxInt(2) v4.AddArg2(src, mem) v5 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem) v6 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16) @@ -5290,21 +5290,21 @@ func rewriteValueMIPS64_OpMove(v *Value) bool { // match: (Move [3] dst src mem) // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVBstore [1] dst (MOVBload [1] src mem) (MOVBstore dst (MOVBload src mem) mem))) for { - if v.AuxInt != 3 { + if auxIntToInt64(v.AuxInt) != 3 { break } dst := v_0 src := v_1 mem := v_2 v.reset(OpMIPS64MOVBstore) - v.AuxInt = 2 + v.AuxInt = int32ToAuxInt(2) v0 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8) - v0.AuxInt = 2 + v0.AuxInt = int32ToAuxInt(2) v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem) - v1.AuxInt = 1 + v1.AuxInt = int32ToAuxInt(1) v2 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8) - v2.AuxInt = 1 + v2.AuxInt = int32ToAuxInt(1) v2.AddArg2(src, mem) v3 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem) v4 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8) @@ -5315,28 +5315,28 @@ func rewriteValueMIPS64_OpMove(v *Value) bool { return true } // match: (Move [6] {t} dst src mem) - // cond: t.(*types.Type).Alignment()%2 == 0 + // cond: t.Alignment()%2 == 0 // result: (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem))) for { - if v.AuxInt != 6 { + if auxIntToInt64(v.AuxInt) != 6 { break } - t := v.Aux + t := auxToType(v.Aux) dst := v_0 src := v_1 mem := v_2 - if !(t.(*types.Type).Alignment()%2 == 0) { + if !(t.Alignment()%2 == 0) { break } v.reset(OpMIPS64MOVHstore) - v.AuxInt = 4 + v.AuxInt = int32ToAuxInt(4) v0 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16) - v0.AuxInt = 4 + v0.AuxInt = int32ToAuxInt(4) v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem) - v1.AuxInt = 2 + v1.AuxInt = int32ToAuxInt(2) v2 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16) - v2.AuxInt = 2 + v2.AuxInt = int32ToAuxInt(2) v2.AddArg2(src, mem) v3 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem) v4 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16) @@ -5347,28 +5347,28 @@ func rewriteValueMIPS64_OpMove(v *Value) bool { return true } // match: (Move [12] {t} dst src mem) - // cond: t.(*types.Type).Alignment()%4 == 0 + // cond: t.Alignment()%4 == 0 // result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem))) for { - if v.AuxInt != 12 { + if auxIntToInt64(v.AuxInt) != 12 { break } - t := v.Aux + t := auxToType(v.Aux) dst := v_0 src := v_1 mem := v_2 - if !(t.(*types.Type).Alignment()%4 == 0) { + if !(t.Alignment()%4 == 0) { break } v.reset(OpMIPS64MOVWstore) - v.AuxInt = 8 + v.AuxInt = int32ToAuxInt(8) v0 := b.NewValue0(v.Pos, OpMIPS64MOVWload, typ.Int32) - v0.AuxInt = 8 + v0.AuxInt = int32ToAuxInt(8) v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem) - v1.AuxInt = 4 + v1.AuxInt = int32ToAuxInt(4) v2 := b.NewValue0(v.Pos, OpMIPS64MOVWload, typ.Int32) - v2.AuxInt = 4 + v2.AuxInt = int32ToAuxInt(4) v2.AddArg2(src, mem) v3 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem) v4 := b.NewValue0(v.Pos, OpMIPS64MOVWload, typ.Int32) @@ -5379,23 +5379,23 @@ func rewriteValueMIPS64_OpMove(v *Value) bool { return true } // match: (Move [16] {t} dst src mem) - // cond: t.(*types.Type).Alignment()%8 == 0 + // cond: t.Alignment()%8 == 0 // result: (MOVVstore [8] dst (MOVVload [8] src mem) (MOVVstore dst (MOVVload src mem) mem)) for { - if v.AuxInt != 16 { + if auxIntToInt64(v.AuxInt) != 16 { break } - t := v.Aux + t := auxToType(v.Aux) dst := v_0 src := v_1 mem := v_2 - if !(t.(*types.Type).Alignment()%8 == 0) { + if !(t.Alignment()%8 == 0) { break } v.reset(OpMIPS64MOVVstore) - v.AuxInt = 8 + v.AuxInt = int32ToAuxInt(8) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVload, typ.UInt64) - v0.AuxInt = 8 + v0.AuxInt = int32ToAuxInt(8) v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVload, typ.UInt64) @@ -5405,28 +5405,28 @@ func rewriteValueMIPS64_OpMove(v *Value) bool { return true } // match: (Move [24] {t} dst src mem) - // cond: t.(*types.Type).Alignment()%8 == 0 + // cond: t.Alignment()%8 == 0 // result: (MOVVstore [16] dst (MOVVload [16] src mem) (MOVVstore [8] dst (MOVVload [8] src mem) (MOVVstore dst (MOVVload src mem) mem))) for { - if v.AuxInt != 24 { + if auxIntToInt64(v.AuxInt) != 24 { break } - t := v.Aux + t := auxToType(v.Aux) dst := v_0 src := v_1 mem := v_2 - if !(t.(*types.Type).Alignment()%8 == 0) { + if !(t.Alignment()%8 == 0) { break } v.reset(OpMIPS64MOVVstore) - v.AuxInt = 16 + v.AuxInt = int32ToAuxInt(16) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVload, typ.UInt64) - v0.AuxInt = 16 + v0.AuxInt = int32ToAuxInt(16) v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem) - v1.AuxInt = 8 + v1.AuxInt = int32ToAuxInt(8) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVload, typ.UInt64) - v2.AuxInt = 8 + v2.AuxInt = int32ToAuxInt(8) v2.AddArg2(src, mem) v3 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem) v4 := b.NewValue0(v.Pos, OpMIPS64MOVVload, typ.UInt64) @@ -5437,38 +5437,38 @@ func rewriteValueMIPS64_OpMove(v *Value) bool { return true } // match: (Move [s] {t} dst src mem) - // cond: s%8 == 0 && s >= 24 && s <= 8*128 && t.(*types.Type).Alignment()%8 == 0 && !config.noDuffDevice && logLargeCopy(v, s) + // cond: s%8 == 0 && s >= 24 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice && logLargeCopy(v, s) // result: (DUFFCOPY [16 * (128 - s/8)] dst src mem) for { - s := v.AuxInt - t := v.Aux + s := auxIntToInt64(v.AuxInt) + t := auxToType(v.Aux) dst := v_0 src := v_1 mem := v_2 - if !(s%8 == 0 && s >= 24 && s <= 8*128 && t.(*types.Type).Alignment()%8 == 0 && !config.noDuffDevice && logLargeCopy(v, s)) { + if !(s%8 == 0 && s >= 24 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice && logLargeCopy(v, s)) { break } v.reset(OpMIPS64DUFFCOPY) - v.AuxInt = 16 * (128 - s/8) + v.AuxInt = int64ToAuxInt(16 * (128 - s/8)) v.AddArg3(dst, src, mem) return true } // match: (Move [s] {t} dst src mem) - // cond: s > 24 && logLargeCopy(v, s) || t.(*types.Type).Alignment()%8 != 0 - // result: (LoweredMove [t.(*types.Type).Alignment()] dst src (ADDVconst src [s-moveSize(t.(*types.Type).Alignment(), config)]) mem) + // cond: s > 24 && logLargeCopy(v, s) || t.Alignment()%8 != 0 + // result: (LoweredMove [t.Alignment()] dst src (ADDVconst src [s-moveSize(t.Alignment(), config)]) mem) for { - s := v.AuxInt - t := v.Aux + s := auxIntToInt64(v.AuxInt) + t := auxToType(v.Aux) dst := v_0 src := v_1 mem := v_2 - if !(s > 24 && logLargeCopy(v, s) || t.(*types.Type).Alignment()%8 != 0) { + if !(s > 24 && logLargeCopy(v, s) || t.Alignment()%8 != 0) { break } v.reset(OpMIPS64LoweredMove) - v.AuxInt = t.(*types.Type).Alignment() + v.AuxInt = int64ToAuxInt(t.Alignment()) v0 := b.NewValue0(v.Pos, OpMIPS64ADDVconst, src.Type) - v0.AuxInt = s - moveSize(t.(*types.Type).Alignment(), config) + v0.AuxInt = int64ToAuxInt(s - moveSize(t.Alignment(), config)) v0.AddArg(src) v.AddArg4(dst, src, v0, mem) return true @@ -5561,7 +5561,7 @@ func rewriteValueMIPS64_OpNeq16(v *Value) bool { v2.AddArg(y) v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v3.AuxInt = 0 + v3.AuxInt = int64ToAuxInt(0) v.AddArg2(v0, v3) return true } @@ -5584,7 +5584,7 @@ func rewriteValueMIPS64_OpNeq32(v *Value) bool { v2.AddArg(y) v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v3.AuxInt = 0 + v3.AuxInt = int64ToAuxInt(0) v.AddArg2(v0, v3) return true } @@ -5619,7 +5619,7 @@ func rewriteValueMIPS64_OpNeq64(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64) v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v1.AuxInt = 0 + v1.AuxInt = int64ToAuxInt(0) v.AddArg2(v0, v1) return true } @@ -5658,7 +5658,7 @@ func rewriteValueMIPS64_OpNeq8(v *Value) bool { v2.AddArg(y) v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v3.AuxInt = 0 + v3.AuxInt = int64ToAuxInt(0) v.AddArg2(v0, v3) return true } @@ -5677,7 +5677,7 @@ func rewriteValueMIPS64_OpNeqPtr(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64) v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v1.AuxInt = 0 + v1.AuxInt = int64ToAuxInt(0) v.AddArg2(v0, v1) return true } @@ -5689,7 +5689,7 @@ func rewriteValueMIPS64_OpNot(v *Value) bool { for { x := v_0 v.reset(OpMIPS64XORconst) - v.AuxInt = 1 + v.AuxInt = int64ToAuxInt(1) v.AddArg(x) return true } @@ -5728,7 +5728,7 @@ func rewriteValueMIPS64_OpPanicBounds(v *Value) bool { // cond: boundsABI(kind) == 0 // result: (LoweredPanicBoundsA [kind] x y mem) for { - kind := v.AuxInt + kind := auxIntToInt64(v.AuxInt) x := v_0 y := v_1 mem := v_2 @@ -5736,7 +5736,7 @@ func rewriteValueMIPS64_OpPanicBounds(v *Value) bool { break } v.reset(OpMIPS64LoweredPanicBoundsA) - v.AuxInt = kind + v.AuxInt = int64ToAuxInt(kind) v.AddArg3(x, y, mem) return true } @@ -5744,7 +5744,7 @@ func rewriteValueMIPS64_OpPanicBounds(v *Value) bool { // cond: boundsABI(kind) == 1 // result: (LoweredPanicBoundsB [kind] x y mem) for { - kind := v.AuxInt + kind := auxIntToInt64(v.AuxInt) x := v_0 y := v_1 mem := v_2 @@ -5752,7 +5752,7 @@ func rewriteValueMIPS64_OpPanicBounds(v *Value) bool { break } v.reset(OpMIPS64LoweredPanicBoundsB) - v.AuxInt = kind + v.AuxInt = int64ToAuxInt(kind) v.AddArg3(x, y, mem) return true } @@ -5760,7 +5760,7 @@ func rewriteValueMIPS64_OpPanicBounds(v *Value) bool { // cond: boundsABI(kind) == 2 // result: (LoweredPanicBoundsC [kind] x y mem) for { - kind := v.AuxInt + kind := auxIntToInt64(v.AuxInt) x := v_0 y := v_1 mem := v_2 @@ -5768,7 +5768,7 @@ func rewriteValueMIPS64_OpPanicBounds(v *Value) bool { break } v.reset(OpMIPS64LoweredPanicBoundsC) - v.AuxInt = kind + v.AuxInt = int64ToAuxInt(kind) v.AddArg3(x, y, mem) return true } @@ -5787,15 +5787,15 @@ func rewriteValueMIPS64_OpRotateLeft16(v *Value) bool { if v_1.Op != OpMIPS64MOVVconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpOr16) v0 := b.NewValue0(v.Pos, OpLsh16x64, t) v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v1.AuxInt = c & 15 + v1.AuxInt = int64ToAuxInt(c & 15) v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh16Ux64, t) v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v3.AuxInt = -c & 15 + v3.AuxInt = int64ToAuxInt(-c & 15) v2.AddArg2(x, v3) v.AddArg2(v0, v2) return true @@ -5815,15 +5815,15 @@ func rewriteValueMIPS64_OpRotateLeft32(v *Value) bool { if v_1.Op != OpMIPS64MOVVconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpOr32) v0 := b.NewValue0(v.Pos, OpLsh32x64, t) v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v1.AuxInt = c & 31 + v1.AuxInt = int64ToAuxInt(c & 31) v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh32Ux64, t) v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v3.AuxInt = -c & 31 + v3.AuxInt = int64ToAuxInt(-c & 31) v2.AddArg2(x, v3) v.AddArg2(v0, v2) return true @@ -5843,15 +5843,15 @@ func rewriteValueMIPS64_OpRotateLeft64(v *Value) bool { if v_1.Op != OpMIPS64MOVVconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpOr64) v0 := b.NewValue0(v.Pos, OpLsh64x64, t) v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v1.AuxInt = c & 63 + v1.AuxInt = int64ToAuxInt(c & 63) v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh64Ux64, t) v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v3.AuxInt = -c & 63 + v3.AuxInt = int64ToAuxInt(-c & 63) v2.AddArg2(x, v3) v.AddArg2(v0, v2) return true @@ -5871,15 +5871,15 @@ func rewriteValueMIPS64_OpRotateLeft8(v *Value) bool { if v_1.Op != OpMIPS64MOVVconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpOr8) v0 := b.NewValue0(v.Pos, OpLsh8x64, t) v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v1.AuxInt = c & 7 + v1.AuxInt = int64ToAuxInt(c & 7) v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh8Ux64, t) v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v3.AuxInt = -c & 7 + v3.AuxInt = int64ToAuxInt(-c & 7) v2.AddArg2(x, v3) v.AddArg2(v0, v2) return true @@ -5901,7 +5901,7 @@ func rewriteValueMIPS64_OpRsh16Ux16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v2.AuxInt = 64 + v2.AuxInt = int64ToAuxInt(64) v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v3.AddArg(y) v1.AddArg2(v2, v3) @@ -5929,7 +5929,7 @@ func rewriteValueMIPS64_OpRsh16Ux32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v2.AuxInt = 64 + v2.AuxInt = int64ToAuxInt(64) v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v3.AddArg(y) v1.AddArg2(v2, v3) @@ -5957,7 +5957,7 @@ func rewriteValueMIPS64_OpRsh16Ux64(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v2.AuxInt = 64 + v2.AuxInt = int64ToAuxInt(64) v1.AddArg2(v2, y) v0.AddArg(v1) v3 := b.NewValue0(v.Pos, OpMIPS64SRLV, t) @@ -5983,7 +5983,7 @@ func rewriteValueMIPS64_OpRsh16Ux8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v2.AuxInt = 64 + v2.AuxInt = int64ToAuxInt(64) v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v3.AddArg(y) v1.AddArg2(v2, v3) @@ -6016,7 +6016,7 @@ func rewriteValueMIPS64_OpRsh16x16(v *Value) bool { v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v4.AddArg(y) v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v5.AuxInt = 63 + v5.AuxInt = int64ToAuxInt(63) v3.AddArg2(v4, v5) v2.AddArg(v3) v1.AddArg2(v2, v4) @@ -6044,7 +6044,7 @@ func rewriteValueMIPS64_OpRsh16x32(v *Value) bool { v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v4.AddArg(y) v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v5.AuxInt = 63 + v5.AuxInt = int64ToAuxInt(63) v3.AddArg2(v4, v5) v2.AddArg(v3) v1.AddArg2(v2, v4) @@ -6070,7 +6070,7 @@ func rewriteValueMIPS64_OpRsh16x64(v *Value) bool { v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v4.AuxInt = 63 + v4.AuxInt = int64ToAuxInt(63) v3.AddArg2(y, v4) v2.AddArg(v3) v1.AddArg2(v2, y) @@ -6098,7 +6098,7 @@ func rewriteValueMIPS64_OpRsh16x8(v *Value) bool { v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v4.AddArg(y) v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v5.AuxInt = 63 + v5.AuxInt = int64ToAuxInt(63) v3.AddArg2(v4, v5) v2.AddArg(v3) v1.AddArg2(v2, v4) @@ -6121,7 +6121,7 @@ func rewriteValueMIPS64_OpRsh32Ux16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v2.AuxInt = 64 + v2.AuxInt = int64ToAuxInt(64) v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v3.AddArg(y) v1.AddArg2(v2, v3) @@ -6149,7 +6149,7 @@ func rewriteValueMIPS64_OpRsh32Ux32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v2.AuxInt = 64 + v2.AuxInt = int64ToAuxInt(64) v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v3.AddArg(y) v1.AddArg2(v2, v3) @@ -6177,7 +6177,7 @@ func rewriteValueMIPS64_OpRsh32Ux64(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v2.AuxInt = 64 + v2.AuxInt = int64ToAuxInt(64) v1.AddArg2(v2, y) v0.AddArg(v1) v3 := b.NewValue0(v.Pos, OpMIPS64SRLV, t) @@ -6203,7 +6203,7 @@ func rewriteValueMIPS64_OpRsh32Ux8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v2.AuxInt = 64 + v2.AuxInt = int64ToAuxInt(64) v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v3.AddArg(y) v1.AddArg2(v2, v3) @@ -6236,7 +6236,7 @@ func rewriteValueMIPS64_OpRsh32x16(v *Value) bool { v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v4.AddArg(y) v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v5.AuxInt = 63 + v5.AuxInt = int64ToAuxInt(63) v3.AddArg2(v4, v5) v2.AddArg(v3) v1.AddArg2(v2, v4) @@ -6264,7 +6264,7 @@ func rewriteValueMIPS64_OpRsh32x32(v *Value) bool { v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v4.AddArg(y) v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v5.AuxInt = 63 + v5.AuxInt = int64ToAuxInt(63) v3.AddArg2(v4, v5) v2.AddArg(v3) v1.AddArg2(v2, v4) @@ -6290,7 +6290,7 @@ func rewriteValueMIPS64_OpRsh32x64(v *Value) bool { v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v4.AuxInt = 63 + v4.AuxInt = int64ToAuxInt(63) v3.AddArg2(y, v4) v2.AddArg(v3) v1.AddArg2(v2, y) @@ -6318,7 +6318,7 @@ func rewriteValueMIPS64_OpRsh32x8(v *Value) bool { v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v4.AddArg(y) v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v5.AuxInt = 63 + v5.AuxInt = int64ToAuxInt(63) v3.AddArg2(v4, v5) v2.AddArg(v3) v1.AddArg2(v2, v4) @@ -6341,7 +6341,7 @@ func rewriteValueMIPS64_OpRsh64Ux16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v2.AuxInt = 64 + v2.AuxInt = int64ToAuxInt(64) v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v3.AddArg(y) v1.AddArg2(v2, v3) @@ -6367,7 +6367,7 @@ func rewriteValueMIPS64_OpRsh64Ux32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v2.AuxInt = 64 + v2.AuxInt = int64ToAuxInt(64) v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v3.AddArg(y) v1.AddArg2(v2, v3) @@ -6393,7 +6393,7 @@ func rewriteValueMIPS64_OpRsh64Ux64(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v2.AuxInt = 64 + v2.AuxInt = int64ToAuxInt(64) v1.AddArg2(v2, y) v0.AddArg(v1) v3 := b.NewValue0(v.Pos, OpMIPS64SRLV, t) @@ -6417,7 +6417,7 @@ func rewriteValueMIPS64_OpRsh64Ux8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v2.AuxInt = 64 + v2.AuxInt = int64ToAuxInt(64) v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v3.AddArg(y) v1.AddArg2(v2, v3) @@ -6446,7 +6446,7 @@ func rewriteValueMIPS64_OpRsh64x16(v *Value) bool { v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v3.AddArg(y) v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v4.AuxInt = 63 + v4.AuxInt = int64ToAuxInt(63) v2.AddArg2(v3, v4) v1.AddArg(v2) v0.AddArg2(v1, v3) @@ -6472,7 +6472,7 @@ func rewriteValueMIPS64_OpRsh64x32(v *Value) bool { v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v3.AddArg(y) v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v4.AuxInt = 63 + v4.AuxInt = int64ToAuxInt(63) v2.AddArg2(v3, v4) v1.AddArg(v2) v0.AddArg2(v1, v3) @@ -6496,7 +6496,7 @@ func rewriteValueMIPS64_OpRsh64x64(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v2 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v3.AuxInt = 63 + v3.AuxInt = int64ToAuxInt(63) v2.AddArg2(y, v3) v1.AddArg(v2) v0.AddArg2(v1, y) @@ -6522,7 +6522,7 @@ func rewriteValueMIPS64_OpRsh64x8(v *Value) bool { v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v3.AddArg(y) v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v4.AuxInt = 63 + v4.AuxInt = int64ToAuxInt(63) v2.AddArg2(v3, v4) v1.AddArg(v2) v0.AddArg2(v1, v3) @@ -6545,7 +6545,7 @@ func rewriteValueMIPS64_OpRsh8Ux16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v2.AuxInt = 64 + v2.AuxInt = int64ToAuxInt(64) v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v3.AddArg(y) v1.AddArg2(v2, v3) @@ -6573,7 +6573,7 @@ func rewriteValueMIPS64_OpRsh8Ux32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v2.AuxInt = 64 + v2.AuxInt = int64ToAuxInt(64) v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v3.AddArg(y) v1.AddArg2(v2, v3) @@ -6601,7 +6601,7 @@ func rewriteValueMIPS64_OpRsh8Ux64(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v2.AuxInt = 64 + v2.AuxInt = int64ToAuxInt(64) v1.AddArg2(v2, y) v0.AddArg(v1) v3 := b.NewValue0(v.Pos, OpMIPS64SRLV, t) @@ -6627,7 +6627,7 @@ func rewriteValueMIPS64_OpRsh8Ux8(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v2.AuxInt = 64 + v2.AuxInt = int64ToAuxInt(64) v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v3.AddArg(y) v1.AddArg2(v2, v3) @@ -6660,7 +6660,7 @@ func rewriteValueMIPS64_OpRsh8x16(v *Value) bool { v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v4.AddArg(y) v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v5.AuxInt = 63 + v5.AuxInt = int64ToAuxInt(63) v3.AddArg2(v4, v5) v2.AddArg(v3) v1.AddArg2(v2, v4) @@ -6688,7 +6688,7 @@ func rewriteValueMIPS64_OpRsh8x32(v *Value) bool { v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v4.AddArg(y) v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v5.AuxInt = 63 + v5.AuxInt = int64ToAuxInt(63) v3.AddArg2(v4, v5) v2.AddArg(v3) v1.AddArg2(v2, v4) @@ -6714,7 +6714,7 @@ func rewriteValueMIPS64_OpRsh8x64(v *Value) bool { v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v4.AuxInt = 63 + v4.AuxInt = int64ToAuxInt(63) v3.AddArg2(y, v4) v2.AddArg(v3) v1.AddArg2(v2, y) @@ -6742,7 +6742,7 @@ func rewriteValueMIPS64_OpRsh8x8(v *Value) bool { v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v4.AddArg(y) v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v5.AuxInt = 63 + v5.AuxInt = int64ToAuxInt(63) v3.AddArg2(v4, v5) v2.AddArg(v3) v1.AddArg2(v2, v4) @@ -6777,11 +6777,11 @@ func rewriteValueMIPS64_OpSelect0(v *Value) bool { } _ = v_0.Args[1] v_0_1 := v_0.Args[1] - if v_0_1.Op != OpMIPS64MOVVconst || v_0_1.AuxInt != 1 { + if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 1 { break } v.reset(OpMIPS64MOVVconst) - v.AuxInt = 0 + v.AuxInt = int64ToAuxInt(0) return true } // match: (Select0 (DIVVU x (MOVVconst [c]))) @@ -6797,12 +6797,12 @@ func rewriteValueMIPS64_OpSelect0(v *Value) bool { if v_0_1.Op != OpMIPS64MOVVconst { break } - c := v_0_1.AuxInt + c := auxIntToInt64(v_0_1.AuxInt) if !(isPowerOfTwo(c)) { break } v.reset(OpMIPS64ANDconst) - v.AuxInt = c - 1 + v.AuxInt = int64ToAuxInt(c - 1) v.AddArg(x) return true } @@ -6817,14 +6817,14 @@ func rewriteValueMIPS64_OpSelect0(v *Value) bool { if v_0_0.Op != OpMIPS64MOVVconst { break } - c := v_0_0.AuxInt + c := auxIntToInt64(v_0_0.AuxInt) v_0_1 := v_0.Args[1] if v_0_1.Op != OpMIPS64MOVVconst { break } - d := v_0_1.AuxInt + d := auxIntToInt64(v_0_1.AuxInt) v.reset(OpMIPS64MOVVconst) - v.AuxInt = c % d + v.AuxInt = int64ToAuxInt(c % d) return true } // match: (Select0 (DIVVU (MOVVconst [c]) (MOVVconst [d]))) @@ -6838,14 +6838,14 @@ func rewriteValueMIPS64_OpSelect0(v *Value) bool { if v_0_0.Op != OpMIPS64MOVVconst { break } - c := v_0_0.AuxInt + c := auxIntToInt64(v_0_0.AuxInt) v_0_1 := v_0.Args[1] if v_0_1.Op != OpMIPS64MOVVconst { break } - d := v_0_1.AuxInt + d := auxIntToInt64(v_0_1.AuxInt) v.reset(OpMIPS64MOVVconst) - v.AuxInt = int64(uint64(c) % uint64(d)) + v.AuxInt = int64ToAuxInt(int64(uint64(c) % uint64(d))) return true } return false @@ -6884,7 +6884,7 @@ func rewriteValueMIPS64_OpSelect1(v *Value) bool { v_0_1 := v_0.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { x := v_0_0 - if v_0_1.Op != OpMIPS64MOVVconst || v_0_1.AuxInt != -1 { + if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != -1 { continue } v.reset(OpMIPS64NEGV) @@ -6903,11 +6903,11 @@ func rewriteValueMIPS64_OpSelect1(v *Value) bool { v_0_0 := v_0.Args[0] v_0_1 := v_0.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { - if v_0_1.Op != OpMIPS64MOVVconst || v_0_1.AuxInt != 0 { + if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 { continue } v.reset(OpMIPS64MOVVconst) - v.AuxInt = 0 + v.AuxInt = int64ToAuxInt(0) return true } break @@ -6923,7 +6923,7 @@ func rewriteValueMIPS64_OpSelect1(v *Value) bool { v_0_1 := v_0.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { x := v_0_0 - if v_0_1.Op != OpMIPS64MOVVconst || v_0_1.AuxInt != 1 { + if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 1 { continue } v.copyOf(x) @@ -6946,12 +6946,12 @@ func rewriteValueMIPS64_OpSelect1(v *Value) bool { if v_0_1.Op != OpMIPS64MOVVconst { continue } - c := v_0_1.AuxInt + c := auxIntToInt64(v_0_1.AuxInt) if !(isPowerOfTwo(c)) { continue } v.reset(OpMIPS64SLLVconst) - v.AuxInt = log2(c) + v.AuxInt = int64ToAuxInt(log2(c)) v.AddArg(x) return true } @@ -6966,7 +6966,7 @@ func rewriteValueMIPS64_OpSelect1(v *Value) bool { _ = v_0.Args[1] x := v_0.Args[0] v_0_1 := v_0.Args[1] - if v_0_1.Op != OpMIPS64MOVVconst || v_0_1.AuxInt != 1 { + if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 1 { break } v.copyOf(x) @@ -6985,12 +6985,12 @@ func rewriteValueMIPS64_OpSelect1(v *Value) bool { if v_0_1.Op != OpMIPS64MOVVconst { break } - c := v_0_1.AuxInt + c := auxIntToInt64(v_0_1.AuxInt) if !(isPowerOfTwo(c)) { break } v.reset(OpMIPS64SRLVconst) - v.AuxInt = log2(c) + v.AuxInt = int64ToAuxInt(log2(c)) v.AddArg(x) return true } @@ -7007,13 +7007,13 @@ func rewriteValueMIPS64_OpSelect1(v *Value) bool { if v_0_0.Op != OpMIPS64MOVVconst { continue } - c := v_0_0.AuxInt + c := auxIntToInt64(v_0_0.AuxInt) if v_0_1.Op != OpMIPS64MOVVconst { continue } - d := v_0_1.AuxInt + d := auxIntToInt64(v_0_1.AuxInt) v.reset(OpMIPS64MOVVconst) - v.AuxInt = c * d + v.AuxInt = int64ToAuxInt(c * d) return true } break @@ -7029,14 +7029,14 @@ func rewriteValueMIPS64_OpSelect1(v *Value) bool { if v_0_0.Op != OpMIPS64MOVVconst { break } - c := v_0_0.AuxInt + c := auxIntToInt64(v_0_0.AuxInt) v_0_1 := v_0.Args[1] if v_0_1.Op != OpMIPS64MOVVconst { break } - d := v_0_1.AuxInt + d := auxIntToInt64(v_0_1.AuxInt) v.reset(OpMIPS64MOVVconst) - v.AuxInt = c / d + v.AuxInt = int64ToAuxInt(c / d) return true } // match: (Select1 (DIVVU (MOVVconst [c]) (MOVVconst [d]))) @@ -7050,14 +7050,14 @@ func rewriteValueMIPS64_OpSelect1(v *Value) bool { if v_0_0.Op != OpMIPS64MOVVconst { break } - c := v_0_0.AuxInt + c := auxIntToInt64(v_0_0.AuxInt) v_0_1 := v_0.Args[1] if v_0_1.Op != OpMIPS64MOVVconst { break } - d := v_0_1.AuxInt + d := auxIntToInt64(v_0_1.AuxInt) v.reset(OpMIPS64MOVVconst) - v.AuxInt = int64(uint64(c) / uint64(d)) + v.AuxInt = int64ToAuxInt(int64(uint64(c) / uint64(d))) return true } return false @@ -7071,7 +7071,7 @@ func rewriteValueMIPS64_OpSlicemask(v *Value) bool { t := v.Type x := v_0 v.reset(OpMIPS64SRAVconst) - v.AuxInt = 63 + v.AuxInt = int64ToAuxInt(63) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v0.AddArg(x) v.AddArg(v0) @@ -7083,14 +7083,14 @@ func rewriteValueMIPS64_OpStore(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (Store {t} ptr val mem) - // cond: t.(*types.Type).Size() == 1 + // cond: t.Size() == 1 // result: (MOVBstore ptr val mem) for { - t := v.Aux + t := auxToType(v.Aux) ptr := v_0 val := v_1 mem := v_2 - if !(t.(*types.Type).Size() == 1) { + if !(t.Size() == 1) { break } v.reset(OpMIPS64MOVBstore) @@ -7098,14 +7098,14 @@ func rewriteValueMIPS64_OpStore(v *Value) bool { return true } // match: (Store {t} ptr val mem) - // cond: t.(*types.Type).Size() == 2 + // cond: t.Size() == 2 // result: (MOVHstore ptr val mem) for { - t := v.Aux + t := auxToType(v.Aux) ptr := v_0 val := v_1 mem := v_2 - if !(t.(*types.Type).Size() == 2) { + if !(t.Size() == 2) { break } v.reset(OpMIPS64MOVHstore) @@ -7113,14 +7113,14 @@ func rewriteValueMIPS64_OpStore(v *Value) bool { return true } // match: (Store {t} ptr val mem) - // cond: t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type) + // cond: t.Size() == 4 && !is32BitFloat(val.Type) // result: (MOVWstore ptr val mem) for { - t := v.Aux + t := auxToType(v.Aux) ptr := v_0 val := v_1 mem := v_2 - if !(t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type)) { + if !(t.Size() == 4 && !is32BitFloat(val.Type)) { break } v.reset(OpMIPS64MOVWstore) @@ -7128,14 +7128,14 @@ func rewriteValueMIPS64_OpStore(v *Value) bool { return true } // match: (Store {t} ptr val mem) - // cond: t.(*types.Type).Size() == 8 && !is64BitFloat(val.Type) + // cond: t.Size() == 8 && !is64BitFloat(val.Type) // result: (MOVVstore ptr val mem) for { - t := v.Aux + t := auxToType(v.Aux) ptr := v_0 val := v_1 mem := v_2 - if !(t.(*types.Type).Size() == 8 && !is64BitFloat(val.Type)) { + if !(t.Size() == 8 && !is64BitFloat(val.Type)) { break } v.reset(OpMIPS64MOVVstore) @@ -7143,14 +7143,14 @@ func rewriteValueMIPS64_OpStore(v *Value) bool { return true } // match: (Store {t} ptr val mem) - // cond: t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) + // cond: t.Size() == 4 && is32BitFloat(val.Type) // result: (MOVFstore ptr val mem) for { - t := v.Aux + t := auxToType(v.Aux) ptr := v_0 val := v_1 mem := v_2 - if !(t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)) { + if !(t.Size() == 4 && is32BitFloat(val.Type)) { break } v.reset(OpMIPS64MOVFstore) @@ -7158,14 +7158,14 @@ func rewriteValueMIPS64_OpStore(v *Value) bool { return true } // match: (Store {t} ptr val mem) - // cond: t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) + // cond: t.Size() == 8 && is64BitFloat(val.Type) // result: (MOVDstore ptr val mem) for { - t := v.Aux + t := auxToType(v.Aux) ptr := v_0 val := v_1 mem := v_2 - if !(t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)) { + if !(t.Size() == 8 && is64BitFloat(val.Type)) { break } v.reset(OpMIPS64MOVDstore) @@ -7183,7 +7183,7 @@ func rewriteValueMIPS64_OpZero(v *Value) bool { // match: (Zero [0] _ mem) // result: mem for { - if v.AuxInt != 0 { + if auxIntToInt64(v.AuxInt) != 0 { break } mem := v_1 @@ -7193,92 +7193,92 @@ func rewriteValueMIPS64_OpZero(v *Value) bool { // match: (Zero [1] ptr mem) // result: (MOVBstore ptr (MOVVconst [0]) mem) for { - if v.AuxInt != 1 { + if auxIntToInt64(v.AuxInt) != 1 { break } ptr := v_0 mem := v_1 v.reset(OpMIPS64MOVBstore) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v0.AuxInt = 0 + v0.AuxInt = int64ToAuxInt(0) v.AddArg3(ptr, v0, mem) return true } // match: (Zero [2] {t} ptr mem) - // cond: t.(*types.Type).Alignment()%2 == 0 + // cond: t.Alignment()%2 == 0 // result: (MOVHstore ptr (MOVVconst [0]) mem) for { - if v.AuxInt != 2 { + if auxIntToInt64(v.AuxInt) != 2 { break } - t := v.Aux + t := auxToType(v.Aux) ptr := v_0 mem := v_1 - if !(t.(*types.Type).Alignment()%2 == 0) { + if !(t.Alignment()%2 == 0) { break } v.reset(OpMIPS64MOVHstore) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v0.AuxInt = 0 + v0.AuxInt = int64ToAuxInt(0) v.AddArg3(ptr, v0, mem) return true } // match: (Zero [2] ptr mem) // result: (MOVBstore [1] ptr (MOVVconst [0]) (MOVBstore [0] ptr (MOVVconst [0]) mem)) for { - if v.AuxInt != 2 { + if auxIntToInt64(v.AuxInt) != 2 { break } ptr := v_0 mem := v_1 v.reset(OpMIPS64MOVBstore) - v.AuxInt = 1 + v.AuxInt = int32ToAuxInt(1) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v0.AuxInt = 0 + v0.AuxInt = int64ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem) - v1.AuxInt = 0 + v1.AuxInt = int32ToAuxInt(0) v1.AddArg3(ptr, v0, mem) v.AddArg3(ptr, v0, v1) return true } // match: (Zero [4] {t} ptr mem) - // cond: t.(*types.Type).Alignment()%4 == 0 + // cond: t.Alignment()%4 == 0 // result: (MOVWstore ptr (MOVVconst [0]) mem) for { - if v.AuxInt != 4 { + if auxIntToInt64(v.AuxInt) != 4 { break } - t := v.Aux + t := auxToType(v.Aux) ptr := v_0 mem := v_1 - if !(t.(*types.Type).Alignment()%4 == 0) { + if !(t.Alignment()%4 == 0) { break } v.reset(OpMIPS64MOVWstore) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v0.AuxInt = 0 + v0.AuxInt = int64ToAuxInt(0) v.AddArg3(ptr, v0, mem) return true } // match: (Zero [4] {t} ptr mem) - // cond: t.(*types.Type).Alignment()%2 == 0 + // cond: t.Alignment()%2 == 0 // result: (MOVHstore [2] ptr (MOVVconst [0]) (MOVHstore [0] ptr (MOVVconst [0]) mem)) for { - if v.AuxInt != 4 { + if auxIntToInt64(v.AuxInt) != 4 { break } - t := v.Aux + t := auxToType(v.Aux) ptr := v_0 mem := v_1 - if !(t.(*types.Type).Alignment()%2 == 0) { + if !(t.Alignment()%2 == 0) { break } v.reset(OpMIPS64MOVHstore) - v.AuxInt = 2 + v.AuxInt = int32ToAuxInt(2) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v0.AuxInt = 0 + v0.AuxInt = int64ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem) - v1.AuxInt = 0 + v1.AuxInt = int32ToAuxInt(0) v1.AddArg3(ptr, v0, mem) v.AddArg3(ptr, v0, v1) return true @@ -7286,21 +7286,21 @@ func rewriteValueMIPS64_OpZero(v *Value) bool { // match: (Zero [4] ptr mem) // result: (MOVBstore [3] ptr (MOVVconst [0]) (MOVBstore [2] ptr (MOVVconst [0]) (MOVBstore [1] ptr (MOVVconst [0]) (MOVBstore [0] ptr (MOVVconst [0]) mem)))) for { - if v.AuxInt != 4 { + if auxIntToInt64(v.AuxInt) != 4 { break } ptr := v_0 mem := v_1 v.reset(OpMIPS64MOVBstore) - v.AuxInt = 3 + v.AuxInt = int32ToAuxInt(3) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v0.AuxInt = 0 + v0.AuxInt = int64ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem) - v1.AuxInt = 2 + v1.AuxInt = int32ToAuxInt(2) v2 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem) - v2.AuxInt = 1 + v2.AuxInt = int32ToAuxInt(1) v3 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem) - v3.AuxInt = 0 + v3.AuxInt = int32ToAuxInt(0) v3.AddArg3(ptr, v0, mem) v2.AddArg3(ptr, v0, v3) v1.AddArg3(ptr, v0, v2) @@ -7308,70 +7308,70 @@ func rewriteValueMIPS64_OpZero(v *Value) bool { return true } // match: (Zero [8] {t} ptr mem) - // cond: t.(*types.Type).Alignment()%8 == 0 + // cond: t.Alignment()%8 == 0 // result: (MOVVstore ptr (MOVVconst [0]) mem) for { - if v.AuxInt != 8 { + if auxIntToInt64(v.AuxInt) != 8 { break } - t := v.Aux + t := auxToType(v.Aux) ptr := v_0 mem := v_1 - if !(t.(*types.Type).Alignment()%8 == 0) { + if !(t.Alignment()%8 == 0) { break } v.reset(OpMIPS64MOVVstore) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v0.AuxInt = 0 + v0.AuxInt = int64ToAuxInt(0) v.AddArg3(ptr, v0, mem) return true } // match: (Zero [8] {t} ptr mem) - // cond: t.(*types.Type).Alignment()%4 == 0 + // cond: t.Alignment()%4 == 0 // result: (MOVWstore [4] ptr (MOVVconst [0]) (MOVWstore [0] ptr (MOVVconst [0]) mem)) for { - if v.AuxInt != 8 { + if auxIntToInt64(v.AuxInt) != 8 { break } - t := v.Aux + t := auxToType(v.Aux) ptr := v_0 mem := v_1 - if !(t.(*types.Type).Alignment()%4 == 0) { + if !(t.Alignment()%4 == 0) { break } v.reset(OpMIPS64MOVWstore) - v.AuxInt = 4 + v.AuxInt = int32ToAuxInt(4) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v0.AuxInt = 0 + v0.AuxInt = int64ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem) - v1.AuxInt = 0 + v1.AuxInt = int32ToAuxInt(0) v1.AddArg3(ptr, v0, mem) v.AddArg3(ptr, v0, v1) return true } // match: (Zero [8] {t} ptr mem) - // cond: t.(*types.Type).Alignment()%2 == 0 + // cond: t.Alignment()%2 == 0 // result: (MOVHstore [6] ptr (MOVVconst [0]) (MOVHstore [4] ptr (MOVVconst [0]) (MOVHstore [2] ptr (MOVVconst [0]) (MOVHstore [0] ptr (MOVVconst [0]) mem)))) for { - if v.AuxInt != 8 { + if auxIntToInt64(v.AuxInt) != 8 { break } - t := v.Aux + t := auxToType(v.Aux) ptr := v_0 mem := v_1 - if !(t.(*types.Type).Alignment()%2 == 0) { + if !(t.Alignment()%2 == 0) { break } v.reset(OpMIPS64MOVHstore) - v.AuxInt = 6 + v.AuxInt = int32ToAuxInt(6) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v0.AuxInt = 0 + v0.AuxInt = int64ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem) - v1.AuxInt = 4 + v1.AuxInt = int32ToAuxInt(4) v2 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem) - v2.AuxInt = 2 + v2.AuxInt = int32ToAuxInt(2) v3 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem) - v3.AuxInt = 0 + v3.AuxInt = int32ToAuxInt(0) v3.AddArg3(ptr, v0, mem) v2.AddArg3(ptr, v0, v3) v1.AddArg3(ptr, v0, v2) @@ -7381,156 +7381,156 @@ func rewriteValueMIPS64_OpZero(v *Value) bool { // match: (Zero [3] ptr mem) // result: (MOVBstore [2] ptr (MOVVconst [0]) (MOVBstore [1] ptr (MOVVconst [0]) (MOVBstore [0] ptr (MOVVconst [0]) mem))) for { - if v.AuxInt != 3 { + if auxIntToInt64(v.AuxInt) != 3 { break } ptr := v_0 mem := v_1 v.reset(OpMIPS64MOVBstore) - v.AuxInt = 2 + v.AuxInt = int32ToAuxInt(2) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v0.AuxInt = 0 + v0.AuxInt = int64ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem) - v1.AuxInt = 1 + v1.AuxInt = int32ToAuxInt(1) v2 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem) - v2.AuxInt = 0 + v2.AuxInt = int32ToAuxInt(0) v2.AddArg3(ptr, v0, mem) v1.AddArg3(ptr, v0, v2) v.AddArg3(ptr, v0, v1) return true } // match: (Zero [6] {t} ptr mem) - // cond: t.(*types.Type).Alignment()%2 == 0 + // cond: t.Alignment()%2 == 0 // result: (MOVHstore [4] ptr (MOVVconst [0]) (MOVHstore [2] ptr (MOVVconst [0]) (MOVHstore [0] ptr (MOVVconst [0]) mem))) for { - if v.AuxInt != 6 { + if auxIntToInt64(v.AuxInt) != 6 { break } - t := v.Aux + t := auxToType(v.Aux) ptr := v_0 mem := v_1 - if !(t.(*types.Type).Alignment()%2 == 0) { + if !(t.Alignment()%2 == 0) { break } v.reset(OpMIPS64MOVHstore) - v.AuxInt = 4 + v.AuxInt = int32ToAuxInt(4) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v0.AuxInt = 0 + v0.AuxInt = int64ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem) - v1.AuxInt = 2 + v1.AuxInt = int32ToAuxInt(2) v2 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem) - v2.AuxInt = 0 + v2.AuxInt = int32ToAuxInt(0) v2.AddArg3(ptr, v0, mem) v1.AddArg3(ptr, v0, v2) v.AddArg3(ptr, v0, v1) return true } // match: (Zero [12] {t} ptr mem) - // cond: t.(*types.Type).Alignment()%4 == 0 + // cond: t.Alignment()%4 == 0 // result: (MOVWstore [8] ptr (MOVVconst [0]) (MOVWstore [4] ptr (MOVVconst [0]) (MOVWstore [0] ptr (MOVVconst [0]) mem))) for { - if v.AuxInt != 12 { + if auxIntToInt64(v.AuxInt) != 12 { break } - t := v.Aux + t := auxToType(v.Aux) ptr := v_0 mem := v_1 - if !(t.(*types.Type).Alignment()%4 == 0) { + if !(t.Alignment()%4 == 0) { break } v.reset(OpMIPS64MOVWstore) - v.AuxInt = 8 + v.AuxInt = int32ToAuxInt(8) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v0.AuxInt = 0 + v0.AuxInt = int64ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem) - v1.AuxInt = 4 + v1.AuxInt = int32ToAuxInt(4) v2 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem) - v2.AuxInt = 0 + v2.AuxInt = int32ToAuxInt(0) v2.AddArg3(ptr, v0, mem) v1.AddArg3(ptr, v0, v2) v.AddArg3(ptr, v0, v1) return true } // match: (Zero [16] {t} ptr mem) - // cond: t.(*types.Type).Alignment()%8 == 0 + // cond: t.Alignment()%8 == 0 // result: (MOVVstore [8] ptr (MOVVconst [0]) (MOVVstore [0] ptr (MOVVconst [0]) mem)) for { - if v.AuxInt != 16 { + if auxIntToInt64(v.AuxInt) != 16 { break } - t := v.Aux + t := auxToType(v.Aux) ptr := v_0 mem := v_1 - if !(t.(*types.Type).Alignment()%8 == 0) { + if !(t.Alignment()%8 == 0) { break } v.reset(OpMIPS64MOVVstore) - v.AuxInt = 8 + v.AuxInt = int32ToAuxInt(8) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v0.AuxInt = 0 + v0.AuxInt = int64ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem) - v1.AuxInt = 0 + v1.AuxInt = int32ToAuxInt(0) v1.AddArg3(ptr, v0, mem) v.AddArg3(ptr, v0, v1) return true } // match: (Zero [24] {t} ptr mem) - // cond: t.(*types.Type).Alignment()%8 == 0 + // cond: t.Alignment()%8 == 0 // result: (MOVVstore [16] ptr (MOVVconst [0]) (MOVVstore [8] ptr (MOVVconst [0]) (MOVVstore [0] ptr (MOVVconst [0]) mem))) for { - if v.AuxInt != 24 { + if auxIntToInt64(v.AuxInt) != 24 { break } - t := v.Aux + t := auxToType(v.Aux) ptr := v_0 mem := v_1 - if !(t.(*types.Type).Alignment()%8 == 0) { + if !(t.Alignment()%8 == 0) { break } v.reset(OpMIPS64MOVVstore) - v.AuxInt = 16 + v.AuxInt = int32ToAuxInt(16) v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v0.AuxInt = 0 + v0.AuxInt = int64ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem) - v1.AuxInt = 8 + v1.AuxInt = int32ToAuxInt(8) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem) - v2.AuxInt = 0 + v2.AuxInt = int32ToAuxInt(0) v2.AddArg3(ptr, v0, mem) v1.AddArg3(ptr, v0, v2) v.AddArg3(ptr, v0, v1) return true } // match: (Zero [s] {t} ptr mem) - // cond: s%8 == 0 && s > 24 && s <= 8*128 && t.(*types.Type).Alignment()%8 == 0 && !config.noDuffDevice + // cond: s%8 == 0 && s > 24 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice // result: (DUFFZERO [8 * (128 - s/8)] ptr mem) for { - s := v.AuxInt - t := v.Aux + s := auxIntToInt64(v.AuxInt) + t := auxToType(v.Aux) ptr := v_0 mem := v_1 - if !(s%8 == 0 && s > 24 && s <= 8*128 && t.(*types.Type).Alignment()%8 == 0 && !config.noDuffDevice) { + if !(s%8 == 0 && s > 24 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice) { break } v.reset(OpMIPS64DUFFZERO) - v.AuxInt = 8 * (128 - s/8) + v.AuxInt = int64ToAuxInt(8 * (128 - s/8)) v.AddArg2(ptr, mem) return true } // match: (Zero [s] {t} ptr mem) - // cond: (s > 8*128 || config.noDuffDevice) || t.(*types.Type).Alignment()%8 != 0 - // result: (LoweredZero [t.(*types.Type).Alignment()] ptr (ADDVconst ptr [s-moveSize(t.(*types.Type).Alignment(), config)]) mem) + // cond: (s > 8*128 || config.noDuffDevice) || t.Alignment()%8 != 0 + // result: (LoweredZero [t.Alignment()] ptr (ADDVconst ptr [s-moveSize(t.Alignment(), config)]) mem) for { - s := v.AuxInt - t := v.Aux + s := auxIntToInt64(v.AuxInt) + t := auxToType(v.Aux) ptr := v_0 mem := v_1 - if !((s > 8*128 || config.noDuffDevice) || t.(*types.Type).Alignment()%8 != 0) { + if !((s > 8*128 || config.noDuffDevice) || t.Alignment()%8 != 0) { break } v.reset(OpMIPS64LoweredZero) - v.AuxInt = t.(*types.Type).Alignment() + v.AuxInt = int64ToAuxInt(t.Alignment()) v0 := b.NewValue0(v.Pos, OpMIPS64ADDVconst, ptr.Type) - v0.AuxInt = s - moveSize(t.(*types.Type).Alignment(), config) + v0.AuxInt = int64ToAuxInt(s - moveSize(t.Alignment(), config)) v0.AddArg(ptr) v.AddArg3(ptr, v0, mem) return true @@ -7560,7 +7560,7 @@ func rewriteBlockMIPS64(b *Block) bool { // result: (NE cmp yes no) for b.Controls[0].Op == OpMIPS64XORconst { v_0 := b.Controls[0] - if v_0.AuxInt != 1 { + if auxIntToInt64(v_0.AuxInt) != 1 { break } cmp := v_0.Args[0] @@ -7574,7 +7574,7 @@ func rewriteBlockMIPS64(b *Block) bool { // result: (NE cmp yes no) for b.Controls[0].Op == OpMIPS64XORconst { v_0 := b.Controls[0] - if v_0.AuxInt != 1 { + if auxIntToInt64(v_0.AuxInt) != 1 { break } cmp := v_0.Args[0] @@ -7588,7 +7588,7 @@ func rewriteBlockMIPS64(b *Block) bool { // result: (NE cmp yes no) for b.Controls[0].Op == OpMIPS64XORconst { v_0 := b.Controls[0] - if v_0.AuxInt != 1 { + if auxIntToInt64(v_0.AuxInt) != 1 { break } cmp := v_0.Args[0] @@ -7602,7 +7602,7 @@ func rewriteBlockMIPS64(b *Block) bool { // result: (NE cmp yes no) for b.Controls[0].Op == OpMIPS64XORconst { v_0 := b.Controls[0] - if v_0.AuxInt != 1 { + if auxIntToInt64(v_0.AuxInt) != 1 { break } cmp := v_0.Args[0] @@ -7616,7 +7616,7 @@ func rewriteBlockMIPS64(b *Block) bool { // result: (NE x yes no) for b.Controls[0].Op == OpMIPS64SGTUconst { v_0 := b.Controls[0] - if v_0.AuxInt != 1 { + if auxIntToInt64(v_0.AuxInt) != 1 { break } x := v_0.Args[0] @@ -7630,7 +7630,7 @@ func rewriteBlockMIPS64(b *Block) bool { _ = v_0.Args[1] x := v_0.Args[0] v_0_1 := v_0.Args[1] - if v_0_1.Op != OpMIPS64MOVVconst || v_0_1.AuxInt != 0 { + if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 { break } b.resetWithControl(BlockMIPS64EQ, x) @@ -7640,7 +7640,7 @@ func rewriteBlockMIPS64(b *Block) bool { // result: (GEZ x yes no) for b.Controls[0].Op == OpMIPS64SGTconst { v_0 := b.Controls[0] - if v_0.AuxInt != 0 { + if auxIntToInt64(v_0.AuxInt) != 0 { break } x := v_0.Args[0] @@ -7654,7 +7654,7 @@ func rewriteBlockMIPS64(b *Block) bool { _ = v_0.Args[1] x := v_0.Args[0] v_0_1 := v_0.Args[1] - if v_0_1.Op != OpMIPS64MOVVconst || v_0_1.AuxInt != 0 { + if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 { break } b.resetWithControl(BlockMIPS64LEZ, x) @@ -7664,7 +7664,7 @@ func rewriteBlockMIPS64(b *Block) bool { // result: (First yes no) for b.Controls[0].Op == OpMIPS64MOVVconst { v_0 := b.Controls[0] - if v_0.AuxInt != 0 { + if auxIntToInt64(v_0.AuxInt) != 0 { break } b.Reset(BlockFirst) @@ -7675,7 +7675,7 @@ func rewriteBlockMIPS64(b *Block) bool { // result: (First no yes) for b.Controls[0].Op == OpMIPS64MOVVconst { v_0 := b.Controls[0] - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) if !(c != 0) { break } @@ -7689,7 +7689,7 @@ func rewriteBlockMIPS64(b *Block) bool { // result: (First yes no) for b.Controls[0].Op == OpMIPS64MOVVconst { v_0 := b.Controls[0] - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) if !(c >= 0) { break } @@ -7701,7 +7701,7 @@ func rewriteBlockMIPS64(b *Block) bool { // result: (First no yes) for b.Controls[0].Op == OpMIPS64MOVVconst { v_0 := b.Controls[0] - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) if !(c < 0) { break } @@ -7715,7 +7715,7 @@ func rewriteBlockMIPS64(b *Block) bool { // result: (First yes no) for b.Controls[0].Op == OpMIPS64MOVVconst { v_0 := b.Controls[0] - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) if !(c > 0) { break } @@ -7727,7 +7727,7 @@ func rewriteBlockMIPS64(b *Block) bool { // result: (First no yes) for b.Controls[0].Op == OpMIPS64MOVVconst { v_0 := b.Controls[0] - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) if !(c <= 0) { break } @@ -7749,7 +7749,7 @@ func rewriteBlockMIPS64(b *Block) bool { // result: (First yes no) for b.Controls[0].Op == OpMIPS64MOVVconst { v_0 := b.Controls[0] - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) if !(c <= 0) { break } @@ -7761,7 +7761,7 @@ func rewriteBlockMIPS64(b *Block) bool { // result: (First no yes) for b.Controls[0].Op == OpMIPS64MOVVconst { v_0 := b.Controls[0] - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) if !(c > 0) { break } @@ -7775,7 +7775,7 @@ func rewriteBlockMIPS64(b *Block) bool { // result: (First yes no) for b.Controls[0].Op == OpMIPS64MOVVconst { v_0 := b.Controls[0] - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) if !(c < 0) { break } @@ -7787,7 +7787,7 @@ func rewriteBlockMIPS64(b *Block) bool { // result: (First no yes) for b.Controls[0].Op == OpMIPS64MOVVconst { v_0 := b.Controls[0] - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) if !(c >= 0) { break } @@ -7816,7 +7816,7 @@ func rewriteBlockMIPS64(b *Block) bool { // result: (EQ cmp yes no) for b.Controls[0].Op == OpMIPS64XORconst { v_0 := b.Controls[0] - if v_0.AuxInt != 1 { + if auxIntToInt64(v_0.AuxInt) != 1 { break } cmp := v_0.Args[0] @@ -7830,7 +7830,7 @@ func rewriteBlockMIPS64(b *Block) bool { // result: (EQ cmp yes no) for b.Controls[0].Op == OpMIPS64XORconst { v_0 := b.Controls[0] - if v_0.AuxInt != 1 { + if auxIntToInt64(v_0.AuxInt) != 1 { break } cmp := v_0.Args[0] @@ -7844,7 +7844,7 @@ func rewriteBlockMIPS64(b *Block) bool { // result: (EQ cmp yes no) for b.Controls[0].Op == OpMIPS64XORconst { v_0 := b.Controls[0] - if v_0.AuxInt != 1 { + if auxIntToInt64(v_0.AuxInt) != 1 { break } cmp := v_0.Args[0] @@ -7858,7 +7858,7 @@ func rewriteBlockMIPS64(b *Block) bool { // result: (EQ cmp yes no) for b.Controls[0].Op == OpMIPS64XORconst { v_0 := b.Controls[0] - if v_0.AuxInt != 1 { + if auxIntToInt64(v_0.AuxInt) != 1 { break } cmp := v_0.Args[0] @@ -7872,7 +7872,7 @@ func rewriteBlockMIPS64(b *Block) bool { // result: (EQ x yes no) for b.Controls[0].Op == OpMIPS64SGTUconst { v_0 := b.Controls[0] - if v_0.AuxInt != 1 { + if auxIntToInt64(v_0.AuxInt) != 1 { break } x := v_0.Args[0] @@ -7886,7 +7886,7 @@ func rewriteBlockMIPS64(b *Block) bool { _ = v_0.Args[1] x := v_0.Args[0] v_0_1 := v_0.Args[1] - if v_0_1.Op != OpMIPS64MOVVconst || v_0_1.AuxInt != 0 { + if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 { break } b.resetWithControl(BlockMIPS64NE, x) @@ -7896,7 +7896,7 @@ func rewriteBlockMIPS64(b *Block) bool { // result: (LTZ x yes no) for b.Controls[0].Op == OpMIPS64SGTconst { v_0 := b.Controls[0] - if v_0.AuxInt != 0 { + if auxIntToInt64(v_0.AuxInt) != 0 { break } x := v_0.Args[0] @@ -7910,7 +7910,7 @@ func rewriteBlockMIPS64(b *Block) bool { _ = v_0.Args[1] x := v_0.Args[0] v_0_1 := v_0.Args[1] - if v_0_1.Op != OpMIPS64MOVVconst || v_0_1.AuxInt != 0 { + if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 { break } b.resetWithControl(BlockMIPS64GTZ, x) @@ -7920,7 +7920,7 @@ func rewriteBlockMIPS64(b *Block) bool { // result: (First no yes) for b.Controls[0].Op == OpMIPS64MOVVconst { v_0 := b.Controls[0] - if v_0.AuxInt != 0 { + if auxIntToInt64(v_0.AuxInt) != 0 { break } b.Reset(BlockFirst) @@ -7932,7 +7932,7 @@ func rewriteBlockMIPS64(b *Block) bool { // result: (First yes no) for b.Controls[0].Op == OpMIPS64MOVVconst { v_0 := b.Controls[0] - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) if !(c != 0) { break }