diff --git a/src/cmd/compile/internal/ssa/gen/PPC64.rules b/src/cmd/compile/internal/ssa/gen/PPC64.rules index e32c42d630..7e18def938 100644 --- a/src/cmd/compile/internal/ssa/gen/PPC64.rules +++ b/src/cmd/compile/internal/ssa/gen/PPC64.rules @@ -155,12 +155,6 @@ // The following reduction shows up frequently too. e.g b[(x>>14)&0xFF] (CLRLSLDI [c] i:(RLWINM [s] x)) && mergePPC64ClrlsldiRlwinm(c,s) != 0 => (RLWINM [mergePPC64ClrlsldiRlwinm(c,s)] x) -// large constant shifts -((Lsh64|Rsh64U)x64 _ (MOVDconst [c])) && uint64(c) >= 64 => (MOVDconst [0]) -((Lsh32|Rsh32U)x64 _ (MOVDconst [c])) && uint64(c) >= 32 => (MOVDconst [0]) -((Lsh16|Rsh16U)x64 _ (MOVDconst [c])) && uint64(c) >= 16 => (MOVDconst [0]) -((Lsh8|Rsh8U)x64 _ (MOVDconst [c])) && uint64(c) >= 8 => (MOVDconst [0]) - // large constant signed right shift, we leave the sign bit (Rsh64x64 x (MOVDconst [c])) && uint64(c) >= 64 => (SRADconst x [63]) (Rsh32x64 x (MOVDconst [c])) && uint64(c) >= 32 => (SRAWconst x [63]) @@ -175,13 +169,6 @@ ((Rsh8|Rsh8U)x64 x (MOVDconst [c])) && uint64(c) < 8 => (SR(AW|W)const ((Sign|Zero)Ext8to32 x) [c]) (Lsh8x64 x (MOVDconst [c])) && uint64(c) < 8 => (SLWconst x [c]) -((Lsh64|Rsh64|Rsh64U)x32 x (MOVDconst [c])) && uint32(c) < 64 => (S(L|RA|R)Dconst x [c&63]) -((Lsh32|Rsh32|Rsh32U)x32 x (MOVDconst [c])) && uint32(c) < 32 => (S(L|RA|R)Wconst x [c&31]) -(Lsh16x32 x (MOVDconst [c])) && uint32(c) < 16 => (SLWconst x [c&15]) -(Rsh(16|16U)x32 x (MOVDconst [c])) && uint32(c) < 16 => (S(RA|R)Wconst ((Sign|Zero)Ext16to32 x) [c&15]) -(Lsh8x32 x (MOVDconst [c])) && uint32(c) < 8 => (SLWconst x [c&7]) -(Rsh(8|8U)x32 x (MOVDconst [c])) && uint32(c) < 8 => (S(RA|R)Wconst ((Sign|Zero)Ext8to32 x) [c&7]) - // Lower bounded shifts first. No need to check shift value. (Lsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SLD x y) (Lsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SLW x y) @@ -197,27 +184,8 @@ (Rsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAW (MOVBreg x) y) // non-constant rotates -// These are subexpressions found in statements that can become rotates -// In these cases the shift count is known to be < 64 so the more complicated expressions -// with Mask & Carry is not needed -((Lsh64|Rsh64U|Rsh64)x64 x (AND y (MOVDconst [63]))) => (S(L|R|RA)D x (Select0 (ANDCCconst [63] y))) -(Lsh64x64 x (Select0 (ANDCCconst [63] y))) => (SLD x (Select0 (ANDCCconst [63] y))) -((Rsh64U|Rsh64)x64 x (Select0 (ANDCCconst [63] y))) => (S(R|RA)D x (Select0 (ANDCCconst [63] y))) -((Rsh64U|Rsh64)x64 x (SUB (MOVDconst [64]) (Select0 (ANDCCconst [63] y)))) => (SR(D|AD) x (SUB (MOVDconst [64]) (Select0 (ANDCCconst [63] y)))) -((Rsh64U|Rsh64)x64 x (SUBFCconst [64] (Select0 (ANDCCconst [63] y)))) => (SR(D|AD) x (SUBFCconst [64] (Select0 (ANDCCconst [63] y)))) -((Rsh64U|Rsh64)x64 x (SUB (MOVDconst [64]) (AND y (MOVDconst [63])))) => (SR(D|AD) x (SUB (MOVDconst [64]) (Select0 (ANDCCconst [63] y)))) -((Rsh64U|Rsh64)x64 x (SUBFCconst [64] (AND y (MOVDconst [63])))) => (SR(D|AD) x (SUBFCconst [64] (Select0 (ANDCCconst [63] y)))) - +// If shift > 64 then use -1 as shift count to shift all bits. ((Lsh64|Rsh64|Rsh64U)x64 x y) => (S(L|RA|R)D x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64])))) - -((Lsh32|Rsh32|Rsh32U)x64 x (AND y (MOVDconst [31]))) => (S(L|RA|R)W x (Select0 (ANDCCconst [31] y))) -(Lsh32x64 x (Select0 (ANDCCconst [31] y))) => (SLW x (Select0 (ANDCCconst [31] y))) -((Rsh32|Rsh32U)x64 x (Select0 (ANDCCconst [31] y))) => (S(RA|R)W x (Select0 (ANDCCconst [31] y))) -(Rsh(32|32U)x64 x (SUB (MOVDconst [32]) (Select0 (ANDCCconst [31] y)))) => (SR(AW|W) x (SUB (MOVDconst [32]) (Select0 (ANDCCconst [31] y)))) -(Rsh(32|32U)x64 x (SUBFCconst [32] (Select0 (ANDCCconst [31] y)))) => (SR(AW|W) x (SUBFCconst [32] (Select0 (ANDCCconst [31] y)))) -(Rsh(32|32U)x64 x (SUB (MOVDconst [32]) (AND y (MOVDconst [31])))) => (SR(AW|W) x (SUB (MOVDconst [32]) (Select0 (ANDCCconst [31] y)))) -(Rsh(32|32U)x64 x (SUBFCconst [32] (AND y (MOVDconst [31])))) => (SR(AW|W) x (SUBFCconst [32] (Select0 (ANDCCconst [31] y)))) - ((Rsh32|Rsh32U|Lsh32)x64 x y) => (S(RA|R|L)W x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32])))) (Rsh(16|16U)x64 x y) => (SR(AW|W) ((Sign|Zero)Ext16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [16])))) diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go index 8d0caf833b..d3cf9646b7 100644 --- a/src/cmd/compile/internal/ssa/rewritePPC64.go +++ b/src/cmd/compile/internal/ssa/rewritePPC64.go @@ -2406,23 +2406,6 @@ func rewriteValuePPC64_OpLsh16x32(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Lsh16x32 x (MOVDconst [c])) - // cond: uint32(c) < 16 - // result: (SLWconst x [c&15]) - for { - x := v_0 - if v_1.Op != OpPPC64MOVDconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - if !(uint32(c) < 16) { - break - } - v.reset(OpPPC64SLWconst) - v.AuxInt = int64ToAuxInt(c & 15) - v.AddArg(x) - return true - } // match: (Lsh16x32 x y) // cond: shiftIsBounded(v) // result: (SLW x y) @@ -2460,21 +2443,6 @@ func rewriteValuePPC64_OpLsh16x64(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Lsh16x64 _ (MOVDconst [c])) - // cond: uint64(c) >= 16 - // result: (MOVDconst [0]) - for { - if v_1.Op != OpPPC64MOVDconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - if !(uint64(c) >= 16) { - break - } - v.reset(OpPPC64MOVDconst) - v.AuxInt = int64ToAuxInt(0) - return true - } // match: (Lsh16x64 x (MOVDconst [c])) // cond: uint64(c) < 16 // result: (SLWconst x [c]) @@ -2607,23 +2575,6 @@ func rewriteValuePPC64_OpLsh32x32(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Lsh32x32 x (MOVDconst [c])) - // cond: uint32(c) < 32 - // result: (SLWconst x [c&31]) - for { - x := v_0 - if v_1.Op != OpPPC64MOVDconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - if !(uint32(c) < 32) { - break - } - v.reset(OpPPC64SLWconst) - v.AuxInt = int64ToAuxInt(c & 31) - v.AddArg(x) - return true - } // match: (Lsh32x32 x y) // cond: shiftIsBounded(v) // result: (SLW x y) @@ -2661,21 +2612,6 @@ func rewriteValuePPC64_OpLsh32x64(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Lsh32x64 _ (MOVDconst [c])) - // cond: uint64(c) >= 32 - // result: (MOVDconst [0]) - for { - if v_1.Op != OpPPC64MOVDconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - if !(uint64(c) >= 32) { - break - } - v.reset(OpPPC64MOVDconst) - v.AuxInt = int64ToAuxInt(0) - return true - } // match: (Lsh32x64 x (MOVDconst [c])) // cond: uint64(c) < 32 // result: (SLWconst x [c]) @@ -2706,53 +2642,6 @@ func rewriteValuePPC64_OpLsh32x64(v *Value) bool { v.AddArg2(x, y) return true } - // match: (Lsh32x64 x (AND y (MOVDconst [31]))) - // result: (SLW x (Select0 (ANDCCconst [31] y))) - for { - x := v_0 - if v_1.Op != OpPPC64AND { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { - y := v_1_0 - if v_1_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1.AuxInt) != 31 { - continue - } - v.reset(OpPPC64SLW) - v0 := b.NewValue0(v.Pos, OpSelect0, typ.Int32) - v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) - v1.AuxInt = int64ToAuxInt(31) - v1.AddArg(y) - v0.AddArg(v1) - v.AddArg2(x, v0) - return true - } - break - } - // match: (Lsh32x64 x (Select0 (ANDCCconst [31] y))) - // result: (SLW x (Select0 (ANDCCconst [31] y))) - for { - x := v_0 - if v_1.Op != OpSelect0 || v_1.Type != typ.Int32 { - break - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_1_0.AuxInt) != 31 { - break - } - y := v_1_0.Args[0] - v.reset(OpPPC64SLW) - v0 := b.NewValue0(v.Pos, OpSelect0, typ.Int32) - v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) - v1.AuxInt = int64ToAuxInt(31) - v1.AddArg(y) - v0.AddArg(v1) - v.AddArg2(x, v0) - return true - } // match: (Lsh32x64 x y) // result: (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32])))) for { @@ -2855,23 +2744,6 @@ func rewriteValuePPC64_OpLsh64x32(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Lsh64x32 x (MOVDconst [c])) - // cond: uint32(c) < 64 - // result: (SLDconst x [c&63]) - for { - x := v_0 - if v_1.Op != OpPPC64MOVDconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - if !(uint32(c) < 64) { - break - } - v.reset(OpPPC64SLDconst) - v.AuxInt = int64ToAuxInt(c & 63) - v.AddArg(x) - return true - } // match: (Lsh64x32 x y) // cond: shiftIsBounded(v) // result: (SLD x y) @@ -2909,21 +2781,6 @@ func rewriteValuePPC64_OpLsh64x64(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Lsh64x64 _ (MOVDconst [c])) - // cond: uint64(c) >= 64 - // result: (MOVDconst [0]) - for { - if v_1.Op != OpPPC64MOVDconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - if !(uint64(c) >= 64) { - break - } - v.reset(OpPPC64MOVDconst) - v.AuxInt = int64ToAuxInt(0) - return true - } // match: (Lsh64x64 x (MOVDconst [c])) // cond: uint64(c) < 64 // result: (SLDconst x [c]) @@ -2954,53 +2811,6 @@ func rewriteValuePPC64_OpLsh64x64(v *Value) bool { v.AddArg2(x, y) return true } - // match: (Lsh64x64 x (AND y (MOVDconst [63]))) - // result: (SLD x (Select0 (ANDCCconst [63] y))) - for { - x := v_0 - if v_1.Op != OpPPC64AND { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { - y := v_1_0 - if v_1_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1.AuxInt) != 63 { - continue - } - v.reset(OpPPC64SLD) - v0 := b.NewValue0(v.Pos, OpSelect0, typ.Int64) - v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) - v1.AuxInt = int64ToAuxInt(63) - v1.AddArg(y) - v0.AddArg(v1) - v.AddArg2(x, v0) - return true - } - break - } - // match: (Lsh64x64 x (Select0 (ANDCCconst [63] y))) - // result: (SLD x (Select0 (ANDCCconst [63] y))) - for { - x := v_0 - if v_1.Op != OpSelect0 { - break - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpPPC64ANDCCconst || v_1_0.Type != typ.Int64 || auxIntToInt64(v_1_0.AuxInt) != 63 { - break - } - y := v_1_0.Args[0] - v.reset(OpPPC64SLD) - v0 := b.NewValue0(v.Pos, OpSelect0, typ.Int64) - v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) - v1.AuxInt = int64ToAuxInt(63) - v1.AddArg(y) - v0.AddArg(v1) - v.AddArg2(x, v0) - return true - } // match: (Lsh64x64 x y) // result: (SLD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64])))) for { @@ -3103,23 +2913,6 @@ func rewriteValuePPC64_OpLsh8x32(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Lsh8x32 x (MOVDconst [c])) - // cond: uint32(c) < 8 - // result: (SLWconst x [c&7]) - for { - x := v_0 - if v_1.Op != OpPPC64MOVDconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - if !(uint32(c) < 8) { - break - } - v.reset(OpPPC64SLWconst) - v.AuxInt = int64ToAuxInt(c & 7) - v.AddArg(x) - return true - } // match: (Lsh8x32 x y) // cond: shiftIsBounded(v) // result: (SLW x y) @@ -3157,21 +2950,6 @@ func rewriteValuePPC64_OpLsh8x64(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Lsh8x64 _ (MOVDconst [c])) - // cond: uint64(c) >= 8 - // result: (MOVDconst [0]) - for { - if v_1.Op != OpPPC64MOVDconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - if !(uint64(c) >= 8) { - break - } - v.reset(OpPPC64MOVDconst) - v.AuxInt = int64ToAuxInt(0) - return true - } // match: (Lsh8x64 x (MOVDconst [c])) // cond: uint64(c) < 8 // result: (SLWconst x [c]) @@ -13842,25 +13620,6 @@ func rewriteValuePPC64_OpRsh16Ux32(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Rsh16Ux32 x (MOVDconst [c])) - // cond: uint32(c) < 16 - // result: (SRWconst (ZeroExt16to32 x) [c&15]) - for { - x := v_0 - if v_1.Op != OpPPC64MOVDconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - if !(uint32(c) < 16) { - break - } - v.reset(OpPPC64SRWconst) - v.AuxInt = int64ToAuxInt(c & 15) - v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) - v0.AddArg(x) - v.AddArg(v0) - return true - } // match: (Rsh16Ux32 x y) // cond: shiftIsBounded(v) // result: (SRW (MOVHZreg x) y) @@ -13902,21 +13661,6 @@ func rewriteValuePPC64_OpRsh16Ux64(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Rsh16Ux64 _ (MOVDconst [c])) - // cond: uint64(c) >= 16 - // result: (MOVDconst [0]) - for { - if v_1.Op != OpPPC64MOVDconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - if !(uint64(c) >= 16) { - break - } - v.reset(OpPPC64MOVDconst) - v.AuxInt = int64ToAuxInt(0) - return true - } // match: (Rsh16Ux64 x (MOVDconst [c])) // cond: uint64(c) < 16 // result: (SRWconst (ZeroExt16to32 x) [c]) @@ -14063,25 +13807,6 @@ func rewriteValuePPC64_OpRsh16x32(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Rsh16x32 x (MOVDconst [c])) - // cond: uint32(c) < 16 - // result: (SRAWconst (SignExt16to32 x) [c&15]) - for { - x := v_0 - if v_1.Op != OpPPC64MOVDconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - if !(uint32(c) < 16) { - break - } - v.reset(OpPPC64SRAWconst) - v.AuxInt = int64ToAuxInt(c & 15) - v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) - v0.AddArg(x) - v.AddArg(v0) - return true - } // match: (Rsh16x32 x y) // cond: shiftIsBounded(v) // result: (SRAW (MOVHreg x) y) @@ -14284,23 +14009,6 @@ func rewriteValuePPC64_OpRsh32Ux32(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Rsh32Ux32 x (MOVDconst [c])) - // cond: uint32(c) < 32 - // result: (SRWconst x [c&31]) - for { - x := v_0 - if v_1.Op != OpPPC64MOVDconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - if !(uint32(c) < 32) { - break - } - v.reset(OpPPC64SRWconst) - v.AuxInt = int64ToAuxInt(c & 31) - v.AddArg(x) - return true - } // match: (Rsh32Ux32 x y) // cond: shiftIsBounded(v) // result: (SRW x y) @@ -14338,21 +14046,6 @@ func rewriteValuePPC64_OpRsh32Ux64(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Rsh32Ux64 _ (MOVDconst [c])) - // cond: uint64(c) >= 32 - // result: (MOVDconst [0]) - for { - if v_1.Op != OpPPC64MOVDconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - if !(uint64(c) >= 32) { - break - } - v.reset(OpPPC64MOVDconst) - v.AuxInt = int64ToAuxInt(0) - return true - } // match: (Rsh32Ux64 x (MOVDconst [c])) // cond: uint64(c) < 32 // result: (SRWconst x [c]) @@ -14383,187 +14076,6 @@ func rewriteValuePPC64_OpRsh32Ux64(v *Value) bool { v.AddArg2(x, y) return true } - // match: (Rsh32Ux64 x (AND y (MOVDconst [31]))) - // result: (SRW x (Select0 (ANDCCconst [31] y))) - for { - x := v_0 - if v_1.Op != OpPPC64AND { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { - y := v_1_0 - if v_1_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1.AuxInt) != 31 { - continue - } - v.reset(OpPPC64SRW) - v0 := b.NewValue0(v.Pos, OpSelect0, typ.Int32) - v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) - v1.AuxInt = int64ToAuxInt(31) - v1.AddArg(y) - v0.AddArg(v1) - v.AddArg2(x, v0) - return true - } - break - } - // match: (Rsh32Ux64 x (Select0 (ANDCCconst [31] y))) - // result: (SRW x (Select0 (ANDCCconst [31] y))) - for { - x := v_0 - if v_1.Op != OpSelect0 { - break - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpPPC64ANDCCconst || v_1_0.Type != typ.UInt || auxIntToInt64(v_1_0.AuxInt) != 31 { - break - } - y := v_1_0.Args[0] - v.reset(OpPPC64SRW) - v0 := b.NewValue0(v.Pos, OpSelect0, typ.UInt) - v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) - v1.AuxInt = int64ToAuxInt(31) - v1.AddArg(y) - v0.AddArg(v1) - v.AddArg2(x, v0) - return true - } - // match: (Rsh32Ux64 x (SUB (MOVDconst [32]) (Select0 (ANDCCconst [31] y)))) - // result: (SRW x (SUB (MOVDconst [32]) (Select0 (ANDCCconst [31] y)))) - for { - x := v_0 - if v_1.Op != OpPPC64SUB || v_1.Type != typ.UInt { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_0.AuxInt) != 32 { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpSelect0 { - break - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpPPC64ANDCCconst || v_1_1_0.Type != typ.UInt || auxIntToInt64(v_1_1_0.AuxInt) != 31 { - break - } - y := v_1_1_0.Args[0] - v.reset(OpPPC64SRW) - v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) - v1.AuxInt = int64ToAuxInt(32) - v2 := b.NewValue0(v.Pos, OpSelect0, typ.UInt) - v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) - v3.AuxInt = int64ToAuxInt(31) - v3.AddArg(y) - v2.AddArg(v3) - v0.AddArg2(v1, v2) - v.AddArg2(x, v0) - return true - } - // match: (Rsh32Ux64 x (SUBFCconst [32] (Select0 (ANDCCconst [31] y)))) - // result: (SRW x (SUBFCconst [32] (Select0 (ANDCCconst [31] y)))) - for { - x := v_0 - if v_1.Op != OpPPC64SUBFCconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 32 { - break - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpSelect0 { - break - } - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpPPC64ANDCCconst || v_1_0_0.Type != typ.UInt || auxIntToInt64(v_1_0_0.AuxInt) != 31 { - break - } - y := v_1_0_0.Args[0] - v.reset(OpPPC64SRW) - v0 := b.NewValue0(v.Pos, OpPPC64SUBFCconst, typ.UInt) - v0.AuxInt = int64ToAuxInt(32) - v1 := b.NewValue0(v.Pos, OpSelect0, typ.UInt) - v2 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) - v2.AuxInt = int64ToAuxInt(31) - v2.AddArg(y) - v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg2(x, v0) - return true - } - // match: (Rsh32Ux64 x (SUB (MOVDconst [32]) (AND y (MOVDconst [31])))) - // result: (SRW x (SUB (MOVDconst [32]) (Select0 (ANDCCconst [31] y)))) - for { - x := v_0 - if v_1.Op != OpPPC64SUB || v_1.Type != typ.UInt { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_0.AuxInt) != 32 { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpPPC64AND || v_1_1.Type != typ.UInt { - break - } - _ = v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - v_1_1_1 := v_1_1.Args[1] - for _i0 := 0; _i0 <= 1; _i0, v_1_1_0, v_1_1_1 = _i0+1, v_1_1_1, v_1_1_0 { - y := v_1_1_0 - if v_1_1_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1_1.AuxInt) != 31 { - continue - } - v.reset(OpPPC64SRW) - v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) - v1.AuxInt = int64ToAuxInt(32) - v2 := b.NewValue0(v.Pos, OpSelect0, typ.UInt) - v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) - v3.AuxInt = int64ToAuxInt(31) - v3.AddArg(y) - v2.AddArg(v3) - v0.AddArg2(v1, v2) - v.AddArg2(x, v0) - return true - } - break - } - // match: (Rsh32Ux64 x (SUBFCconst [32] (AND y (MOVDconst [31])))) - // result: (SRW x (SUBFCconst [32] (Select0 (ANDCCconst [31] y)))) - for { - x := v_0 - if v_1.Op != OpPPC64SUBFCconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 32 { - break - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpPPC64AND || v_1_0.Type != typ.UInt { - break - } - _ = v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - v_1_0_1 := v_1_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 { - y := v_1_0_0 - if v_1_0_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_0_1.AuxInt) != 31 { - continue - } - v.reset(OpPPC64SRW) - v0 := b.NewValue0(v.Pos, OpPPC64SUBFCconst, typ.UInt) - v0.AuxInt = int64ToAuxInt(32) - v1 := b.NewValue0(v.Pos, OpSelect0, typ.UInt) - v2 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) - v2.AuxInt = int64ToAuxInt(31) - v2.AddArg(y) - v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg2(x, v0) - return true - } - break - } // match: (Rsh32Ux64 x y) // result: (SRW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32])))) for { @@ -14666,23 +14178,6 @@ func rewriteValuePPC64_OpRsh32x32(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Rsh32x32 x (MOVDconst [c])) - // cond: uint32(c) < 32 - // result: (SRAWconst x [c&31]) - for { - x := v_0 - if v_1.Op != OpPPC64MOVDconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - if !(uint32(c) < 32) { - break - } - v.reset(OpPPC64SRAWconst) - v.AuxInt = int64ToAuxInt(c & 31) - v.AddArg(x) - return true - } // match: (Rsh32x32 x y) // cond: shiftIsBounded(v) // result: (SRAW x y) @@ -14767,187 +14262,6 @@ func rewriteValuePPC64_OpRsh32x64(v *Value) bool { v.AddArg2(x, y) return true } - // match: (Rsh32x64 x (AND y (MOVDconst [31]))) - // result: (SRAW x (Select0 (ANDCCconst [31] y))) - for { - x := v_0 - if v_1.Op != OpPPC64AND { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { - y := v_1_0 - if v_1_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1.AuxInt) != 31 { - continue - } - v.reset(OpPPC64SRAW) - v0 := b.NewValue0(v.Pos, OpSelect0, typ.Int32) - v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) - v1.AuxInt = int64ToAuxInt(31) - v1.AddArg(y) - v0.AddArg(v1) - v.AddArg2(x, v0) - return true - } - break - } - // match: (Rsh32x64 x (Select0 (ANDCCconst [31] y))) - // result: (SRAW x (Select0 (ANDCCconst [31] y))) - for { - x := v_0 - if v_1.Op != OpSelect0 { - break - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpPPC64ANDCCconst || v_1_0.Type != typ.UInt || auxIntToInt64(v_1_0.AuxInt) != 31 { - break - } - y := v_1_0.Args[0] - v.reset(OpPPC64SRAW) - v0 := b.NewValue0(v.Pos, OpSelect0, typ.UInt) - v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) - v1.AuxInt = int64ToAuxInt(31) - v1.AddArg(y) - v0.AddArg(v1) - v.AddArg2(x, v0) - return true - } - // match: (Rsh32x64 x (SUB (MOVDconst [32]) (Select0 (ANDCCconst [31] y)))) - // result: (SRAW x (SUB (MOVDconst [32]) (Select0 (ANDCCconst [31] y)))) - for { - x := v_0 - if v_1.Op != OpPPC64SUB || v_1.Type != typ.UInt { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_0.AuxInt) != 32 { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpSelect0 { - break - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpPPC64ANDCCconst || v_1_1_0.Type != typ.UInt || auxIntToInt64(v_1_1_0.AuxInt) != 31 { - break - } - y := v_1_1_0.Args[0] - v.reset(OpPPC64SRAW) - v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) - v1.AuxInt = int64ToAuxInt(32) - v2 := b.NewValue0(v.Pos, OpSelect0, typ.UInt) - v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) - v3.AuxInt = int64ToAuxInt(31) - v3.AddArg(y) - v2.AddArg(v3) - v0.AddArg2(v1, v2) - v.AddArg2(x, v0) - return true - } - // match: (Rsh32x64 x (SUBFCconst [32] (Select0 (ANDCCconst [31] y)))) - // result: (SRAW x (SUBFCconst [32] (Select0 (ANDCCconst [31] y)))) - for { - x := v_0 - if v_1.Op != OpPPC64SUBFCconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 32 { - break - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpSelect0 { - break - } - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpPPC64ANDCCconst || v_1_0_0.Type != typ.UInt || auxIntToInt64(v_1_0_0.AuxInt) != 31 { - break - } - y := v_1_0_0.Args[0] - v.reset(OpPPC64SRAW) - v0 := b.NewValue0(v.Pos, OpPPC64SUBFCconst, typ.UInt) - v0.AuxInt = int64ToAuxInt(32) - v1 := b.NewValue0(v.Pos, OpSelect0, typ.UInt) - v2 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) - v2.AuxInt = int64ToAuxInt(31) - v2.AddArg(y) - v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg2(x, v0) - return true - } - // match: (Rsh32x64 x (SUB (MOVDconst [32]) (AND y (MOVDconst [31])))) - // result: (SRAW x (SUB (MOVDconst [32]) (Select0 (ANDCCconst [31] y)))) - for { - x := v_0 - if v_1.Op != OpPPC64SUB || v_1.Type != typ.UInt { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_0.AuxInt) != 32 { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpPPC64AND || v_1_1.Type != typ.UInt { - break - } - _ = v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - v_1_1_1 := v_1_1.Args[1] - for _i0 := 0; _i0 <= 1; _i0, v_1_1_0, v_1_1_1 = _i0+1, v_1_1_1, v_1_1_0 { - y := v_1_1_0 - if v_1_1_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1_1.AuxInt) != 31 { - continue - } - v.reset(OpPPC64SRAW) - v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) - v1.AuxInt = int64ToAuxInt(32) - v2 := b.NewValue0(v.Pos, OpSelect0, typ.UInt) - v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) - v3.AuxInt = int64ToAuxInt(31) - v3.AddArg(y) - v2.AddArg(v3) - v0.AddArg2(v1, v2) - v.AddArg2(x, v0) - return true - } - break - } - // match: (Rsh32x64 x (SUBFCconst [32] (AND y (MOVDconst [31])))) - // result: (SRAW x (SUBFCconst [32] (Select0 (ANDCCconst [31] y)))) - for { - x := v_0 - if v_1.Op != OpPPC64SUBFCconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 32 { - break - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpPPC64AND || v_1_0.Type != typ.UInt { - break - } - _ = v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - v_1_0_1 := v_1_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 { - y := v_1_0_0 - if v_1_0_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_0_1.AuxInt) != 31 { - continue - } - v.reset(OpPPC64SRAW) - v0 := b.NewValue0(v.Pos, OpPPC64SUBFCconst, typ.UInt) - v0.AuxInt = int64ToAuxInt(32) - v1 := b.NewValue0(v.Pos, OpSelect0, typ.UInt) - v2 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) - v2.AuxInt = int64ToAuxInt(31) - v2.AddArg(y) - v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg2(x, v0) - return true - } - break - } // match: (Rsh32x64 x y) // result: (SRAW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32])))) for { @@ -15050,23 +14364,6 @@ func rewriteValuePPC64_OpRsh64Ux32(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Rsh64Ux32 x (MOVDconst [c])) - // cond: uint32(c) < 64 - // result: (SRDconst x [c&63]) - for { - x := v_0 - if v_1.Op != OpPPC64MOVDconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - if !(uint32(c) < 64) { - break - } - v.reset(OpPPC64SRDconst) - v.AuxInt = int64ToAuxInt(c & 63) - v.AddArg(x) - return true - } // match: (Rsh64Ux32 x y) // cond: shiftIsBounded(v) // result: (SRD x y) @@ -15104,21 +14401,6 @@ func rewriteValuePPC64_OpRsh64Ux64(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Rsh64Ux64 _ (MOVDconst [c])) - // cond: uint64(c) >= 64 - // result: (MOVDconst [0]) - for { - if v_1.Op != OpPPC64MOVDconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - if !(uint64(c) >= 64) { - break - } - v.reset(OpPPC64MOVDconst) - v.AuxInt = int64ToAuxInt(0) - return true - } // match: (Rsh64Ux64 x (MOVDconst [c])) // cond: uint64(c) < 64 // result: (SRDconst x [c]) @@ -15149,187 +14431,6 @@ func rewriteValuePPC64_OpRsh64Ux64(v *Value) bool { v.AddArg2(x, y) return true } - // match: (Rsh64Ux64 x (AND y (MOVDconst [63]))) - // result: (SRD x (Select0 (ANDCCconst [63] y))) - for { - x := v_0 - if v_1.Op != OpPPC64AND { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { - y := v_1_0 - if v_1_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1.AuxInt) != 63 { - continue - } - v.reset(OpPPC64SRD) - v0 := b.NewValue0(v.Pos, OpSelect0, typ.Int64) - v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) - v1.AuxInt = int64ToAuxInt(63) - v1.AddArg(y) - v0.AddArg(v1) - v.AddArg2(x, v0) - return true - } - break - } - // match: (Rsh64Ux64 x (Select0 (ANDCCconst [63] y))) - // result: (SRD x (Select0 (ANDCCconst [63] y))) - for { - x := v_0 - if v_1.Op != OpSelect0 { - break - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpPPC64ANDCCconst || v_1_0.Type != typ.UInt || auxIntToInt64(v_1_0.AuxInt) != 63 { - break - } - y := v_1_0.Args[0] - v.reset(OpPPC64SRD) - v0 := b.NewValue0(v.Pos, OpSelect0, typ.UInt) - v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) - v1.AuxInt = int64ToAuxInt(63) - v1.AddArg(y) - v0.AddArg(v1) - v.AddArg2(x, v0) - return true - } - // match: (Rsh64Ux64 x (SUB (MOVDconst [64]) (Select0 (ANDCCconst [63] y)))) - // result: (SRD x (SUB (MOVDconst [64]) (Select0 (ANDCCconst [63] y)))) - for { - x := v_0 - if v_1.Op != OpPPC64SUB || v_1.Type != typ.UInt { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_0.AuxInt) != 64 { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpSelect0 { - break - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpPPC64ANDCCconst || v_1_1_0.Type != typ.UInt || auxIntToInt64(v_1_1_0.AuxInt) != 63 { - break - } - y := v_1_1_0.Args[0] - v.reset(OpPPC64SRD) - v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) - v1.AuxInt = int64ToAuxInt(64) - v2 := b.NewValue0(v.Pos, OpSelect0, typ.UInt) - v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) - v3.AuxInt = int64ToAuxInt(63) - v3.AddArg(y) - v2.AddArg(v3) - v0.AddArg2(v1, v2) - v.AddArg2(x, v0) - return true - } - // match: (Rsh64Ux64 x (SUBFCconst [64] (Select0 (ANDCCconst [63] y)))) - // result: (SRD x (SUBFCconst [64] (Select0 (ANDCCconst [63] y)))) - for { - x := v_0 - if v_1.Op != OpPPC64SUBFCconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 64 { - break - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpSelect0 { - break - } - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpPPC64ANDCCconst || v_1_0_0.Type != typ.UInt || auxIntToInt64(v_1_0_0.AuxInt) != 63 { - break - } - y := v_1_0_0.Args[0] - v.reset(OpPPC64SRD) - v0 := b.NewValue0(v.Pos, OpPPC64SUBFCconst, typ.UInt) - v0.AuxInt = int64ToAuxInt(64) - v1 := b.NewValue0(v.Pos, OpSelect0, typ.UInt) - v2 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) - v2.AuxInt = int64ToAuxInt(63) - v2.AddArg(y) - v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg2(x, v0) - return true - } - // match: (Rsh64Ux64 x (SUB (MOVDconst [64]) (AND y (MOVDconst [63])))) - // result: (SRD x (SUB (MOVDconst [64]) (Select0 (ANDCCconst [63] y)))) - for { - x := v_0 - if v_1.Op != OpPPC64SUB || v_1.Type != typ.UInt { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_0.AuxInt) != 64 { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpPPC64AND || v_1_1.Type != typ.UInt { - break - } - _ = v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - v_1_1_1 := v_1_1.Args[1] - for _i0 := 0; _i0 <= 1; _i0, v_1_1_0, v_1_1_1 = _i0+1, v_1_1_1, v_1_1_0 { - y := v_1_1_0 - if v_1_1_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1_1.AuxInt) != 63 { - continue - } - v.reset(OpPPC64SRD) - v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) - v1.AuxInt = int64ToAuxInt(64) - v2 := b.NewValue0(v.Pos, OpSelect0, typ.UInt) - v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) - v3.AuxInt = int64ToAuxInt(63) - v3.AddArg(y) - v2.AddArg(v3) - v0.AddArg2(v1, v2) - v.AddArg2(x, v0) - return true - } - break - } - // match: (Rsh64Ux64 x (SUBFCconst [64] (AND y (MOVDconst [63])))) - // result: (SRD x (SUBFCconst [64] (Select0 (ANDCCconst [63] y)))) - for { - x := v_0 - if v_1.Op != OpPPC64SUBFCconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 64 { - break - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpPPC64AND || v_1_0.Type != typ.UInt { - break - } - _ = v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - v_1_0_1 := v_1_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 { - y := v_1_0_0 - if v_1_0_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_0_1.AuxInt) != 63 { - continue - } - v.reset(OpPPC64SRD) - v0 := b.NewValue0(v.Pos, OpPPC64SUBFCconst, typ.UInt) - v0.AuxInt = int64ToAuxInt(64) - v1 := b.NewValue0(v.Pos, OpSelect0, typ.UInt) - v2 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) - v2.AuxInt = int64ToAuxInt(63) - v2.AddArg(y) - v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg2(x, v0) - return true - } - break - } // match: (Rsh64Ux64 x y) // result: (SRD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64])))) for { @@ -15432,23 +14533,6 @@ func rewriteValuePPC64_OpRsh64x32(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Rsh64x32 x (MOVDconst [c])) - // cond: uint32(c) < 64 - // result: (SRADconst x [c&63]) - for { - x := v_0 - if v_1.Op != OpPPC64MOVDconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - if !(uint32(c) < 64) { - break - } - v.reset(OpPPC64SRADconst) - v.AuxInt = int64ToAuxInt(c & 63) - v.AddArg(x) - return true - } // match: (Rsh64x32 x y) // cond: shiftIsBounded(v) // result: (SRAD x y) @@ -15533,187 +14617,6 @@ func rewriteValuePPC64_OpRsh64x64(v *Value) bool { v.AddArg2(x, y) return true } - // match: (Rsh64x64 x (AND y (MOVDconst [63]))) - // result: (SRAD x (Select0 (ANDCCconst [63] y))) - for { - x := v_0 - if v_1.Op != OpPPC64AND { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { - y := v_1_0 - if v_1_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1.AuxInt) != 63 { - continue - } - v.reset(OpPPC64SRAD) - v0 := b.NewValue0(v.Pos, OpSelect0, typ.Int64) - v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) - v1.AuxInt = int64ToAuxInt(63) - v1.AddArg(y) - v0.AddArg(v1) - v.AddArg2(x, v0) - return true - } - break - } - // match: (Rsh64x64 x (Select0 (ANDCCconst [63] y))) - // result: (SRAD x (Select0 (ANDCCconst [63] y))) - for { - x := v_0 - if v_1.Op != OpSelect0 { - break - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpPPC64ANDCCconst || v_1_0.Type != typ.UInt || auxIntToInt64(v_1_0.AuxInt) != 63 { - break - } - y := v_1_0.Args[0] - v.reset(OpPPC64SRAD) - v0 := b.NewValue0(v.Pos, OpSelect0, typ.UInt) - v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) - v1.AuxInt = int64ToAuxInt(63) - v1.AddArg(y) - v0.AddArg(v1) - v.AddArg2(x, v0) - return true - } - // match: (Rsh64x64 x (SUB (MOVDconst [64]) (Select0 (ANDCCconst [63] y)))) - // result: (SRAD x (SUB (MOVDconst [64]) (Select0 (ANDCCconst [63] y)))) - for { - x := v_0 - if v_1.Op != OpPPC64SUB || v_1.Type != typ.UInt { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_0.AuxInt) != 64 { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpSelect0 { - break - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpPPC64ANDCCconst || v_1_1_0.Type != typ.UInt || auxIntToInt64(v_1_1_0.AuxInt) != 63 { - break - } - y := v_1_1_0.Args[0] - v.reset(OpPPC64SRAD) - v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) - v1.AuxInt = int64ToAuxInt(64) - v2 := b.NewValue0(v.Pos, OpSelect0, typ.UInt) - v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) - v3.AuxInt = int64ToAuxInt(63) - v3.AddArg(y) - v2.AddArg(v3) - v0.AddArg2(v1, v2) - v.AddArg2(x, v0) - return true - } - // match: (Rsh64x64 x (SUBFCconst [64] (Select0 (ANDCCconst [63] y)))) - // result: (SRAD x (SUBFCconst [64] (Select0 (ANDCCconst [63] y)))) - for { - x := v_0 - if v_1.Op != OpPPC64SUBFCconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 64 { - break - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpSelect0 { - break - } - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpPPC64ANDCCconst || v_1_0_0.Type != typ.UInt || auxIntToInt64(v_1_0_0.AuxInt) != 63 { - break - } - y := v_1_0_0.Args[0] - v.reset(OpPPC64SRAD) - v0 := b.NewValue0(v.Pos, OpPPC64SUBFCconst, typ.UInt) - v0.AuxInt = int64ToAuxInt(64) - v1 := b.NewValue0(v.Pos, OpSelect0, typ.UInt) - v2 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) - v2.AuxInt = int64ToAuxInt(63) - v2.AddArg(y) - v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg2(x, v0) - return true - } - // match: (Rsh64x64 x (SUB (MOVDconst [64]) (AND y (MOVDconst [63])))) - // result: (SRAD x (SUB (MOVDconst [64]) (Select0 (ANDCCconst [63] y)))) - for { - x := v_0 - if v_1.Op != OpPPC64SUB || v_1.Type != typ.UInt { - break - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_0.AuxInt) != 64 { - break - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpPPC64AND || v_1_1.Type != typ.UInt { - break - } - _ = v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - v_1_1_1 := v_1_1.Args[1] - for _i0 := 0; _i0 <= 1; _i0, v_1_1_0, v_1_1_1 = _i0+1, v_1_1_1, v_1_1_0 { - y := v_1_1_0 - if v_1_1_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1_1.AuxInt) != 63 { - continue - } - v.reset(OpPPC64SRAD) - v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) - v1.AuxInt = int64ToAuxInt(64) - v2 := b.NewValue0(v.Pos, OpSelect0, typ.UInt) - v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) - v3.AuxInt = int64ToAuxInt(63) - v3.AddArg(y) - v2.AddArg(v3) - v0.AddArg2(v1, v2) - v.AddArg2(x, v0) - return true - } - break - } - // match: (Rsh64x64 x (SUBFCconst [64] (AND y (MOVDconst [63])))) - // result: (SRAD x (SUBFCconst [64] (Select0 (ANDCCconst [63] y)))) - for { - x := v_0 - if v_1.Op != OpPPC64SUBFCconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 64 { - break - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpPPC64AND || v_1_0.Type != typ.UInt { - break - } - _ = v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - v_1_0_1 := v_1_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 { - y := v_1_0_0 - if v_1_0_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_0_1.AuxInt) != 63 { - continue - } - v.reset(OpPPC64SRAD) - v0 := b.NewValue0(v.Pos, OpPPC64SUBFCconst, typ.UInt) - v0.AuxInt = int64ToAuxInt(64) - v1 := b.NewValue0(v.Pos, OpSelect0, typ.UInt) - v2 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) - v2.AuxInt = int64ToAuxInt(63) - v2.AddArg(y) - v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg2(x, v0) - return true - } - break - } // match: (Rsh64x64 x y) // result: (SRAD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64])))) for { @@ -15820,25 +14723,6 @@ func rewriteValuePPC64_OpRsh8Ux32(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Rsh8Ux32 x (MOVDconst [c])) - // cond: uint32(c) < 8 - // result: (SRWconst (ZeroExt8to32 x) [c&7]) - for { - x := v_0 - if v_1.Op != OpPPC64MOVDconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - if !(uint32(c) < 8) { - break - } - v.reset(OpPPC64SRWconst) - v.AuxInt = int64ToAuxInt(c & 7) - v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) - v0.AddArg(x) - v.AddArg(v0) - return true - } // match: (Rsh8Ux32 x y) // cond: shiftIsBounded(v) // result: (SRW (MOVBZreg x) y) @@ -15880,21 +14764,6 @@ func rewriteValuePPC64_OpRsh8Ux64(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Rsh8Ux64 _ (MOVDconst [c])) - // cond: uint64(c) >= 8 - // result: (MOVDconst [0]) - for { - if v_1.Op != OpPPC64MOVDconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - if !(uint64(c) >= 8) { - break - } - v.reset(OpPPC64MOVDconst) - v.AuxInt = int64ToAuxInt(0) - return true - } // match: (Rsh8Ux64 x (MOVDconst [c])) // cond: uint64(c) < 8 // result: (SRWconst (ZeroExt8to32 x) [c]) @@ -16041,25 +14910,6 @@ func rewriteValuePPC64_OpRsh8x32(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Rsh8x32 x (MOVDconst [c])) - // cond: uint32(c) < 8 - // result: (SRAWconst (SignExt8to32 x) [c&7]) - for { - x := v_0 - if v_1.Op != OpPPC64MOVDconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - if !(uint32(c) < 8) { - break - } - v.reset(OpPPC64SRAWconst) - v.AuxInt = int64ToAuxInt(c & 7) - v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) - v0.AddArg(x) - v.AddArg(v0) - return true - } // match: (Rsh8x32 x y) // cond: shiftIsBounded(v) // result: (SRAW (MOVBreg x) y)