diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 21dbc6238c..6557287caa 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -62,7 +62,7 @@ func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) { } for i := len(b.Values) - 1; i >= 0; i-- { v := b.Values[i] - if flive && (v.Op == ssa.OpAMD64MOVBconst || v.Op == ssa.OpAMD64MOVWconst || v.Op == ssa.OpAMD64MOVLconst || v.Op == ssa.OpAMD64MOVQconst) { + if flive && (v.Op == ssa.OpAMD64MOVLconst || v.Op == ssa.OpAMD64MOVQconst) { // The "mark" is any non-nil Aux value. v.Aux = v } @@ -160,7 +160,7 @@ func opregreg(op obj.As, dest, src int16) *obj.Prog { func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { s.SetLineno(v.Line) switch v.Op { - case ssa.OpAMD64ADDQ, ssa.OpAMD64ADDL, ssa.OpAMD64ADDW, ssa.OpAMD64ADDB: + case ssa.OpAMD64ADDQ, ssa.OpAMD64ADDL: r := gc.SSARegNum(v) r1 := gc.SSARegNum(v.Args[0]) r2 := gc.SSARegNum(v.Args[1]) @@ -193,12 +193,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.To.Reg = r } // 2-address opcode arithmetic - case ssa.OpAMD64SUBQ, ssa.OpAMD64SUBL, ssa.OpAMD64SUBW, ssa.OpAMD64SUBB, - ssa.OpAMD64MULQ, ssa.OpAMD64MULL, ssa.OpAMD64MULW, ssa.OpAMD64MULB, - ssa.OpAMD64ANDQ, ssa.OpAMD64ANDL, ssa.OpAMD64ANDW, ssa.OpAMD64ANDB, - ssa.OpAMD64ORQ, ssa.OpAMD64ORL, ssa.OpAMD64ORW, ssa.OpAMD64ORB, - ssa.OpAMD64XORQ, ssa.OpAMD64XORL, ssa.OpAMD64XORW, ssa.OpAMD64XORB, - ssa.OpAMD64SHLQ, ssa.OpAMD64SHLL, ssa.OpAMD64SHLW, ssa.OpAMD64SHLB, + case ssa.OpAMD64SUBQ, ssa.OpAMD64SUBL, + ssa.OpAMD64MULQ, ssa.OpAMD64MULL, + ssa.OpAMD64ANDQ, ssa.OpAMD64ANDL, + ssa.OpAMD64ORQ, ssa.OpAMD64ORL, + ssa.OpAMD64XORQ, ssa.OpAMD64XORL, + ssa.OpAMD64SHLQ, ssa.OpAMD64SHLL, ssa.OpAMD64SHRQ, ssa.OpAMD64SHRL, ssa.OpAMD64SHRW, ssa.OpAMD64SHRB, ssa.OpAMD64SARQ, ssa.OpAMD64SARL, ssa.OpAMD64SARW, ssa.OpAMD64SARB, ssa.OpAMD64ADDSS, ssa.OpAMD64ADDSD, ssa.OpAMD64SUBSS, ssa.OpAMD64SUBSD, @@ -335,7 +335,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.To.Type = obj.TYPE_REG p.To.Reg = r - case ssa.OpAMD64ADDQconst, ssa.OpAMD64ADDLconst, ssa.OpAMD64ADDWconst, ssa.OpAMD64ADDBconst: + case ssa.OpAMD64ADDQconst, ssa.OpAMD64ADDLconst: r := gc.SSARegNum(v) a := gc.SSARegNum(v.Args[0]) if r == a { @@ -408,7 +408,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.To.Type = obj.TYPE_REG p.To.Reg = r - case ssa.OpAMD64MULQconst, ssa.OpAMD64MULLconst, ssa.OpAMD64MULWconst, ssa.OpAMD64MULBconst: + case ssa.OpAMD64MULQconst, ssa.OpAMD64MULLconst: r := gc.SSARegNum(v) if r != gc.SSARegNum(v.Args[0]) { v.Fatalf("input[0] and output not in same register %s", v.LongString()) @@ -424,11 +424,11 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { //p.From3.Type = obj.TYPE_REG //p.From3.Reg = gc.SSARegNum(v.Args[0]) - case ssa.OpAMD64SUBQconst, ssa.OpAMD64SUBLconst, ssa.OpAMD64SUBWconst, ssa.OpAMD64SUBBconst, - ssa.OpAMD64ANDQconst, ssa.OpAMD64ANDLconst, ssa.OpAMD64ANDWconst, ssa.OpAMD64ANDBconst, - ssa.OpAMD64ORQconst, ssa.OpAMD64ORLconst, ssa.OpAMD64ORWconst, ssa.OpAMD64ORBconst, - ssa.OpAMD64XORQconst, ssa.OpAMD64XORLconst, ssa.OpAMD64XORWconst, ssa.OpAMD64XORBconst, - ssa.OpAMD64SHLQconst, ssa.OpAMD64SHLLconst, ssa.OpAMD64SHLWconst, ssa.OpAMD64SHLBconst, + case ssa.OpAMD64SUBQconst, ssa.OpAMD64SUBLconst, + ssa.OpAMD64ANDQconst, ssa.OpAMD64ANDLconst, + ssa.OpAMD64ORQconst, ssa.OpAMD64ORLconst, + ssa.OpAMD64XORQconst, ssa.OpAMD64XORLconst, + ssa.OpAMD64SHLQconst, ssa.OpAMD64SHLLconst, ssa.OpAMD64SHRQconst, ssa.OpAMD64SHRLconst, ssa.OpAMD64SHRWconst, ssa.OpAMD64SHRBconst, ssa.OpAMD64SARQconst, ssa.OpAMD64SARLconst, ssa.OpAMD64SARWconst, ssa.OpAMD64SARBconst, ssa.OpAMD64ROLQconst, ssa.OpAMD64ROLLconst, ssa.OpAMD64ROLWconst, ssa.OpAMD64ROLBconst: @@ -497,7 +497,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Offset = v.AuxInt p.To.Type = obj.TYPE_REG p.To.Reg = gc.SSARegNum(v.Args[0]) - case ssa.OpAMD64MOVBconst, ssa.OpAMD64MOVWconst, ssa.OpAMD64MOVLconst, ssa.OpAMD64MOVQconst: + case ssa.OpAMD64MOVLconst, ssa.OpAMD64MOVQconst: x := gc.SSARegNum(v) p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_CONST @@ -812,9 +812,9 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { if gc.Maxarg < v.AuxInt { gc.Maxarg = v.AuxInt } - case ssa.OpAMD64NEGQ, ssa.OpAMD64NEGL, ssa.OpAMD64NEGW, ssa.OpAMD64NEGB, + case ssa.OpAMD64NEGQ, ssa.OpAMD64NEGL, ssa.OpAMD64BSWAPQ, ssa.OpAMD64BSWAPL, - ssa.OpAMD64NOTQ, ssa.OpAMD64NOTL, ssa.OpAMD64NOTW, ssa.OpAMD64NOTB: + ssa.OpAMD64NOTQ, ssa.OpAMD64NOTL: r := gc.SSARegNum(v) if r != gc.SSARegNum(v.Args[0]) { v.Fatalf("input[0] and output not in same register %s", v.LongString()) diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules index 3cdac6f416..c0e83d7adc 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules @@ -6,23 +6,23 @@ (Add64 x y) -> (ADDQ x y) (AddPtr x y) -> (ADDQ x y) (Add32 x y) -> (ADDL x y) -(Add16 x y) -> (ADDW x y) -(Add8 x y) -> (ADDB x y) +(Add16 x y) -> (ADDL x y) +(Add8 x y) -> (ADDL x y) (Add32F x y) -> (ADDSS x y) (Add64F x y) -> (ADDSD x y) (Sub64 x y) -> (SUBQ x y) (SubPtr x y) -> (SUBQ x y) (Sub32 x y) -> (SUBL x y) -(Sub16 x y) -> (SUBW x y) -(Sub8 x y) -> (SUBB x y) +(Sub16 x y) -> (SUBL x y) +(Sub8 x y) -> (SUBL x y) (Sub32F x y) -> (SUBSS x y) (Sub64F x y) -> (SUBSD x y) (Mul64 x y) -> (MULQ x y) (Mul32 x y) -> (MULL x y) -(Mul16 x y) -> (MULW x y) -(Mul8 x y) -> (MULB x y) +(Mul16 x y) -> (MULL x y) +(Mul8 x y) -> (MULL x y) (Mul32F x y) -> (MULSS x y) (Mul64F x y) -> (MULSD x y) @@ -60,30 +60,30 @@ (And64 x y) -> (ANDQ x y) (And32 x y) -> (ANDL x y) -(And16 x y) -> (ANDW x y) -(And8 x y) -> (ANDB x y) +(And16 x y) -> (ANDL x y) +(And8 x y) -> (ANDL x y) (Or64 x y) -> (ORQ x y) (Or32 x y) -> (ORL x y) -(Or16 x y) -> (ORW x y) -(Or8 x y) -> (ORB x y) +(Or16 x y) -> (ORL x y) +(Or8 x y) -> (ORL x y) (Xor64 x y) -> (XORQ x y) (Xor32 x y) -> (XORL x y) -(Xor16 x y) -> (XORW x y) -(Xor8 x y) -> (XORB x y) +(Xor16 x y) -> (XORL x y) +(Xor8 x y) -> (XORL x y) (Neg64 x) -> (NEGQ x) (Neg32 x) -> (NEGL x) -(Neg16 x) -> (NEGW x) -(Neg8 x) -> (NEGB x) +(Neg16 x) -> (NEGL x) +(Neg8 x) -> (NEGL x) (Neg32F x) -> (PXOR x (MOVSSconst [f2i(math.Copysign(0, -1))])) (Neg64F x) -> (PXOR x (MOVSDconst [f2i(math.Copysign(0, -1))])) (Com64 x) -> (NOTQ x) (Com32 x) -> (NOTL x) -(Com16 x) -> (NOTW x) -(Com8 x) -> (NOTB x) +(Com16 x) -> (NOTL x) +(Com8 x) -> (NOTL x) // CMPQconst 0 below is redundant because BSF sets Z but how to remove? (Ctz64 x) -> (CMOVQEQconst (BSFQ x) (CMPQconst x [0]) [64]) @@ -169,15 +169,15 @@ (Lsh32x16 x y) -> (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) (Lsh32x8 x y) -> (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) -(Lsh16x64 x y) -> (ANDW (SHLW x y) (SBBLcarrymask (CMPQconst y [16]))) -(Lsh16x32 x y) -> (ANDW (SHLW x y) (SBBLcarrymask (CMPLconst y [16]))) -(Lsh16x16 x y) -> (ANDW (SHLW x y) (SBBLcarrymask (CMPWconst y [16]))) -(Lsh16x8 x y) -> (ANDW (SHLW x y) (SBBLcarrymask (CMPBconst y [16]))) +(Lsh16x64 x y) -> (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) +(Lsh16x32 x y) -> (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) +(Lsh16x16 x y) -> (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) +(Lsh16x8 x y) -> (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) -(Lsh8x64 x y) -> (ANDB (SHLB x y) (SBBLcarrymask (CMPQconst y [8]))) -(Lsh8x32 x y) -> (ANDB (SHLB x y) (SBBLcarrymask (CMPLconst y [8]))) -(Lsh8x16 x y) -> (ANDB (SHLB x y) (SBBLcarrymask (CMPWconst y [8]))) -(Lsh8x8 x y) -> (ANDB (SHLB x y) (SBBLcarrymask (CMPBconst y [8]))) +(Lsh8x64 x y) -> (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) +(Lsh8x32 x y) -> (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) +(Lsh8x16 x y) -> (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) +(Lsh8x8 x y) -> (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) (Lrot64 x [c]) -> (ROLQconst [c&63] x) (Lrot32 x [c]) -> (ROLLconst [c&31] x) @@ -194,38 +194,38 @@ (Rsh32Ux16 x y) -> (ANDL (SHRL x y) (SBBLcarrymask (CMPWconst y [32]))) (Rsh32Ux8 x y) -> (ANDL (SHRL x y) (SBBLcarrymask (CMPBconst y [32]))) -(Rsh16Ux64 x y) -> (ANDW (SHRW x y) (SBBLcarrymask (CMPQconst y [16]))) -(Rsh16Ux32 x y) -> (ANDW (SHRW x y) (SBBLcarrymask (CMPLconst y [16]))) -(Rsh16Ux16 x y) -> (ANDW (SHRW x y) (SBBLcarrymask (CMPWconst y [16]))) -(Rsh16Ux8 x y) -> (ANDW (SHRW x y) (SBBLcarrymask (CMPBconst y [16]))) +(Rsh16Ux64 x y) -> (ANDL (SHRW x y) (SBBLcarrymask (CMPQconst y [16]))) +(Rsh16Ux32 x y) -> (ANDL (SHRW x y) (SBBLcarrymask (CMPLconst y [16]))) +(Rsh16Ux16 x y) -> (ANDL (SHRW x y) (SBBLcarrymask (CMPWconst y [16]))) +(Rsh16Ux8 x y) -> (ANDL (SHRW x y) (SBBLcarrymask (CMPBconst y [16]))) -(Rsh8Ux64 x y) -> (ANDB (SHRB x y) (SBBLcarrymask (CMPQconst y [8]))) -(Rsh8Ux32 x y) -> (ANDB (SHRB x y) (SBBLcarrymask (CMPLconst y [8]))) -(Rsh8Ux16 x y) -> (ANDB (SHRB x y) (SBBLcarrymask (CMPWconst y [8]))) -(Rsh8Ux8 x y) -> (ANDB (SHRB x y) (SBBLcarrymask (CMPBconst y [8]))) +(Rsh8Ux64 x y) -> (ANDL (SHRB x y) (SBBLcarrymask (CMPQconst y [8]))) +(Rsh8Ux32 x y) -> (ANDL (SHRB x y) (SBBLcarrymask (CMPLconst y [8]))) +(Rsh8Ux16 x y) -> (ANDL (SHRB x y) (SBBLcarrymask (CMPWconst y [8]))) +(Rsh8Ux8 x y) -> (ANDL (SHRB x y) (SBBLcarrymask (CMPBconst y [8]))) // Signed right shift needs to return 0/-1 if shift amount is >= width of shifted value. // We implement this by setting the shift value to -1 (all ones) if the shift value is >= width. // Note: for small shift widths we generate 32 bits of mask even when we don't need it all. (Rsh64x64 x y) -> (SARQ x (ORQ y (NOTQ (SBBQcarrymask (CMPQconst y [64]))))) (Rsh64x32 x y) -> (SARQ x (ORL y (NOTL (SBBLcarrymask (CMPLconst y [64]))))) -(Rsh64x16 x y) -> (SARQ x (ORW y (NOTL (SBBLcarrymask (CMPWconst y [64]))))) -(Rsh64x8 x y) -> (SARQ x (ORB y (NOTL (SBBLcarrymask (CMPBconst y [64]))))) +(Rsh64x16 x y) -> (SARQ x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [64]))))) +(Rsh64x8 x y) -> (SARQ x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [64]))))) (Rsh32x64 x y) -> (SARL x (ORQ y (NOTQ (SBBQcarrymask (CMPQconst y [32]))))) (Rsh32x32 x y) -> (SARL x (ORL y (NOTL (SBBLcarrymask (CMPLconst y [32]))))) -(Rsh32x16 x y) -> (SARL x (ORW y (NOTL (SBBLcarrymask (CMPWconst y [32]))))) -(Rsh32x8 x y) -> (SARL x (ORB y (NOTL (SBBLcarrymask (CMPBconst y [32]))))) +(Rsh32x16 x y) -> (SARL x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [32]))))) +(Rsh32x8 x y) -> (SARL x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [32]))))) (Rsh16x64 x y) -> (SARW x (ORQ y (NOTQ (SBBQcarrymask (CMPQconst y [16]))))) (Rsh16x32 x y) -> (SARW x (ORL y (NOTL (SBBLcarrymask (CMPLconst y [16]))))) -(Rsh16x16 x y) -> (SARW x (ORW y (NOTL (SBBLcarrymask (CMPWconst y [16]))))) -(Rsh16x8 x y) -> (SARW x (ORB y (NOTL (SBBLcarrymask (CMPBconst y [16]))))) +(Rsh16x16 x y) -> (SARW x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [16]))))) +(Rsh16x8 x y) -> (SARW x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [16]))))) (Rsh8x64 x y) -> (SARB x (ORQ y (NOTQ (SBBQcarrymask (CMPQconst y [8]))))) (Rsh8x32 x y) -> (SARB x (ORL y (NOTL (SBBLcarrymask (CMPLconst y [8]))))) -(Rsh8x16 x y) -> (SARB x (ORW y (NOTL (SBBLcarrymask (CMPWconst y [8]))))) -(Rsh8x8 x y) -> (SARB x (ORB y (NOTL (SBBLcarrymask (CMPBconst y [8]))))) +(Rsh8x16 x y) -> (SARB x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [8]))))) +(Rsh8x8 x y) -> (SARB x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [8]))))) (Less64 x y) -> (SETL (CMPQ x y)) (Less32 x y) -> (SETL (CMPL x y)) @@ -366,19 +366,19 @@ (Move [size] dst src mem) && (size > 16*64 || config.noDuffDevice) && size%8 == 0 -> (REPMOVSQ dst src (MOVQconst [size/8]) mem) -(Not x) -> (XORBconst [1] x) +(Not x) -> (XORLconst [1] x) (OffPtr [off] ptr) && is32Bit(off) -> (ADDQconst [off] ptr) (OffPtr [off] ptr) -> (ADDQ (MOVQconst [off]) ptr) -(Const8 [val]) -> (MOVBconst [val]) -(Const16 [val]) -> (MOVWconst [val]) +(Const8 [val]) -> (MOVLconst [val]) +(Const16 [val]) -> (MOVLconst [val]) (Const32 [val]) -> (MOVLconst [val]) (Const64 [val]) -> (MOVQconst [val]) (Const32F [val]) -> (MOVSSconst [val]) (Const64F [val]) -> (MOVSDconst [val]) (ConstNil) -> (MOVQconst [0]) -(ConstBool [b]) -> (MOVBconst [b]) +(ConstBool [b]) -> (MOVLconst [b]) (Addr {sym} base) -> (LEAQ {sym} base) @@ -439,44 +439,22 @@ (ADDQ (MOVQconst [c]) x) && is32Bit(c) -> (ADDQconst [c] x) (ADDL x (MOVLconst [c])) -> (ADDLconst [c] x) (ADDL (MOVLconst [c]) x) -> (ADDLconst [c] x) -(ADDW x (MOVWconst [c])) -> (ADDWconst [c] x) -(ADDW (MOVWconst [c]) x) -> (ADDWconst [c] x) -(ADDB x (MOVBconst [c])) -> (ADDBconst [c] x) -(ADDB (MOVBconst [c]) x) -> (ADDBconst [c] x) (SUBQ x (MOVQconst [c])) && is32Bit(c) -> (SUBQconst x [c]) (SUBQ (MOVQconst [c]) x) && is32Bit(c) -> (NEGQ (SUBQconst x [c])) (SUBL x (MOVLconst [c])) -> (SUBLconst x [c]) (SUBL (MOVLconst [c]) x) -> (NEGL (SUBLconst x [c])) -(SUBW x (MOVWconst [c])) -> (SUBWconst x [c]) -(SUBW (MOVWconst [c]) x) -> (NEGW (SUBWconst x [c])) -(SUBB x (MOVBconst [c])) -> (SUBBconst x [c]) -(SUBB (MOVBconst [c]) x) -> (NEGB (SUBBconst x [c])) (MULQ x (MOVQconst [c])) && is32Bit(c) -> (MULQconst [c] x) (MULQ (MOVQconst [c]) x) && is32Bit(c) -> (MULQconst [c] x) (MULL x (MOVLconst [c])) -> (MULLconst [c] x) (MULL (MOVLconst [c]) x) -> (MULLconst [c] x) -(MULW x (MOVWconst [c])) -> (MULWconst [c] x) -(MULW (MOVWconst [c]) x) -> (MULWconst [c] x) -(MULB x (MOVBconst [c])) -> (MULBconst [c] x) -(MULB (MOVBconst [c]) x) -> (MULBconst [c] x) (ANDQ x (MOVQconst [c])) && is32Bit(c) -> (ANDQconst [c] x) (ANDQ (MOVQconst [c]) x) && is32Bit(c) -> (ANDQconst [c] x) (ANDL x (MOVLconst [c])) -> (ANDLconst [c] x) (ANDL (MOVLconst [c]) x) -> (ANDLconst [c] x) -(ANDW x (MOVLconst [c])) -> (ANDWconst [c] x) -(ANDW (MOVLconst [c]) x) -> (ANDWconst [c] x) -(ANDW x (MOVWconst [c])) -> (ANDWconst [c] x) -(ANDW (MOVWconst [c]) x) -> (ANDWconst [c] x) -(ANDB x (MOVLconst [c])) -> (ANDBconst [c] x) -(ANDB (MOVLconst [c]) x) -> (ANDBconst [c] x) -(ANDB x (MOVBconst [c])) -> (ANDBconst [c] x) -(ANDB (MOVBconst [c]) x) -> (ANDBconst [c] x) -(ANDBconst [c] (ANDBconst [d] x)) -> (ANDBconst [c & d] x) -(ANDWconst [c] (ANDWconst [d] x)) -> (ANDWconst [c & d] x) (ANDLconst [c] (ANDLconst [d] x)) -> (ANDLconst [c & d] x) (ANDQconst [c] (ANDQconst [d] x)) -> (ANDQconst [c & d] x) @@ -484,108 +462,64 @@ (ORQ (MOVQconst [c]) x) && is32Bit(c) -> (ORQconst [c] x) (ORL x (MOVLconst [c])) -> (ORLconst [c] x) (ORL (MOVLconst [c]) x) -> (ORLconst [c] x) -(ORW x (MOVWconst [c])) -> (ORWconst [c] x) -(ORW (MOVWconst [c]) x) -> (ORWconst [c] x) -(ORB x (MOVBconst [c])) -> (ORBconst [c] x) -(ORB (MOVBconst [c]) x) -> (ORBconst [c] x) (XORQ x (MOVQconst [c])) && is32Bit(c) -> (XORQconst [c] x) (XORQ (MOVQconst [c]) x) && is32Bit(c) -> (XORQconst [c] x) (XORL x (MOVLconst [c])) -> (XORLconst [c] x) (XORL (MOVLconst [c]) x) -> (XORLconst [c] x) -(XORW x (MOVWconst [c])) -> (XORWconst [c] x) -(XORW (MOVWconst [c]) x) -> (XORWconst [c] x) -(XORB x (MOVBconst [c])) -> (XORBconst [c] x) -(XORB (MOVBconst [c]) x) -> (XORBconst [c] x) (SHLQ x (MOVQconst [c])) -> (SHLQconst [c&63] x) (SHLQ x (MOVLconst [c])) -> (SHLQconst [c&63] x) -(SHLQ x (MOVWconst [c])) -> (SHLQconst [c&63] x) -(SHLQ x (MOVBconst [c])) -> (SHLQconst [c&63] x) (SHLL x (MOVQconst [c])) -> (SHLLconst [c&31] x) (SHLL x (MOVLconst [c])) -> (SHLLconst [c&31] x) -(SHLL x (MOVWconst [c])) -> (SHLLconst [c&31] x) -(SHLL x (MOVBconst [c])) -> (SHLLconst [c&31] x) - -(SHLW x (MOVQconst [c])) -> (SHLWconst [c&31] x) -(SHLW x (MOVLconst [c])) -> (SHLWconst [c&31] x) -(SHLW x (MOVWconst [c])) -> (SHLWconst [c&31] x) -(SHLW x (MOVBconst [c])) -> (SHLWconst [c&31] x) - -(SHLB x (MOVQconst [c])) -> (SHLBconst [c&31] x) -(SHLB x (MOVLconst [c])) -> (SHLBconst [c&31] x) -(SHLB x (MOVWconst [c])) -> (SHLBconst [c&31] x) -(SHLB x (MOVBconst [c])) -> (SHLBconst [c&31] x) (SHRQ x (MOVQconst [c])) -> (SHRQconst [c&63] x) (SHRQ x (MOVLconst [c])) -> (SHRQconst [c&63] x) -(SHRQ x (MOVWconst [c])) -> (SHRQconst [c&63] x) -(SHRQ x (MOVBconst [c])) -> (SHRQconst [c&63] x) (SHRL x (MOVQconst [c])) -> (SHRLconst [c&31] x) (SHRL x (MOVLconst [c])) -> (SHRLconst [c&31] x) -(SHRL x (MOVWconst [c])) -> (SHRLconst [c&31] x) -(SHRL x (MOVBconst [c])) -> (SHRLconst [c&31] x) (SHRW x (MOVQconst [c])) -> (SHRWconst [c&31] x) (SHRW x (MOVLconst [c])) -> (SHRWconst [c&31] x) -(SHRW x (MOVWconst [c])) -> (SHRWconst [c&31] x) -(SHRW x (MOVBconst [c])) -> (SHRWconst [c&31] x) (SHRB x (MOVQconst [c])) -> (SHRBconst [c&31] x) (SHRB x (MOVLconst [c])) -> (SHRBconst [c&31] x) -(SHRB x (MOVWconst [c])) -> (SHRBconst [c&31] x) -(SHRB x (MOVBconst [c])) -> (SHRBconst [c&31] x) (SARQ x (MOVQconst [c])) -> (SARQconst [c&63] x) (SARQ x (MOVLconst [c])) -> (SARQconst [c&63] x) -(SARQ x (MOVWconst [c])) -> (SARQconst [c&63] x) -(SARQ x (MOVBconst [c])) -> (SARQconst [c&63] x) (SARL x (MOVQconst [c])) -> (SARLconst [c&31] x) (SARL x (MOVLconst [c])) -> (SARLconst [c&31] x) -(SARL x (MOVWconst [c])) -> (SARLconst [c&31] x) -(SARL x (MOVBconst [c])) -> (SARLconst [c&31] x) (SARW x (MOVQconst [c])) -> (SARWconst [c&31] x) (SARW x (MOVLconst [c])) -> (SARWconst [c&31] x) -(SARW x (MOVWconst [c])) -> (SARWconst [c&31] x) -(SARW x (MOVBconst [c])) -> (SARWconst [c&31] x) (SARB x (MOVQconst [c])) -> (SARBconst [c&31] x) (SARB x (MOVLconst [c])) -> (SARBconst [c&31] x) -(SARB x (MOVWconst [c])) -> (SARBconst [c&31] x) -(SARB x (MOVBconst [c])) -> (SARBconst [c&31] x) -(SARB x (ANDBconst [31] y)) -> (SARB x y) -(SARW x (ANDWconst [31] y)) -> (SARW x y) (SARL x (ANDLconst [31] y)) -> (SARL x y) (SARQ x (ANDQconst [63] y)) -> (SARQ x y) -(SHLB x (ANDBconst [31] y)) -> (SHLB x y) -(SHLW x (ANDWconst [31] y)) -> (SHLW x y) (SHLL x (ANDLconst [31] y)) -> (SHLL x y) (SHLQ x (ANDQconst [63] y)) -> (SHLQ x y) -(SHRB x (ANDBconst [31] y)) -> (SHRB x y) -(SHRW x (ANDWconst [31] y)) -> (SHRW x y) (SHRL x (ANDLconst [31] y)) -> (SHRL x y) (SHRQ x (ANDQconst [63] y)) -> (SHRQ x y) // Note: the word and byte shifts keep the low 5 bits (not the low 4 or 3 bits) // because the x86 instructions are defined to use all 5 bits of the shift even // for the small shifts. I don't think we'll ever generate a weird shift (e.g. -// (SHLW x (MOVWconst [24])), but just in case. +// (SHRW x (MOVLconst [24])), but just in case. (CMPQ x (MOVQconst [c])) && is32Bit(c) -> (CMPQconst x [c]) (CMPQ (MOVQconst [c]) x) && is32Bit(c) -> (InvertFlags (CMPQconst x [c])) (CMPL x (MOVLconst [c])) -> (CMPLconst x [c]) (CMPL (MOVLconst [c]) x) -> (InvertFlags (CMPLconst x [c])) -(CMPW x (MOVWconst [c])) -> (CMPWconst x [c]) -(CMPW (MOVWconst [c]) x) -> (InvertFlags (CMPWconst x [c])) -(CMPB x (MOVBconst [c])) -> (CMPBconst x [c]) -(CMPB (MOVBconst [c]) x) -> (InvertFlags (CMPBconst x [c])) +(CMPW x (MOVLconst [c])) -> (CMPWconst x [int64(int16(c))]) +(CMPW (MOVLconst [c]) x) -> (InvertFlags (CMPWconst x [int64(int16(c))])) +(CMPB x (MOVLconst [c])) -> (CMPBconst x [int64(int8(c))]) +(CMPB (MOVLconst [c]) x) -> (InvertFlags (CMPBconst x [int64(int8(c))])) // Using MOVBQZX instead of ANDQ is cheaper. (ANDQconst [0xFF] x) -> (MOVBQZX x) @@ -709,12 +643,12 @@ (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x // Fold extensions and ANDs together. -(MOVBQZX (ANDBconst [c] x)) -> (ANDQconst [c & 0xff] x) -(MOVWQZX (ANDWconst [c] x)) -> (ANDQconst [c & 0xffff] x) -(MOVLQZX (ANDLconst [c] x)) && c & 0x80000000 == 0 -> (ANDQconst [c & 0x7fffffff] x) -(MOVBQSX (ANDBconst [c] x)) && c & 0x80 == 0 -> (ANDQconst [c & 0x7f] x) -(MOVWQSX (ANDWconst [c] x)) && c & 0x8000 == 0 -> (ANDQconst [c & 0x7fff] x) -(MOVLQSX (ANDLconst [c] x)) && c & 0x80000000 == 0 -> (ANDQconst [c & 0x7fffffff] x) +(MOVBQZX (ANDLconst [c] x)) -> (ANDLconst [c & 0xff] x) +(MOVWQZX (ANDLconst [c] x)) -> (ANDLconst [c & 0xffff] x) +(MOVLQZX (ANDLconst [c] x)) -> (ANDLconst [c] x) +(MOVBQSX (ANDLconst [c] x)) && c & 0x80 == 0 -> (ANDLconst [c & 0x7f] x) +(MOVWQSX (ANDLconst [c] x)) && c & 0x8000 == 0 -> (ANDLconst [c & 0x7fff] x) +(MOVLQSX (ANDLconst [c] x)) && c & 0x80000000 == 0 -> (ANDLconst [c & 0x7fffffff] x) // Don't extend before storing (MOVLstore [off] {sym} ptr (MOVLQSX x) mem) -> (MOVLstore [off] {sym} ptr x mem) @@ -750,9 +684,9 @@ (MOVQstoreconst [makeValAndOff(c,off)] {sym} ptr mem) (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(off) -> (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem) -(MOVWstore [off] {sym} ptr (MOVWconst [c]) mem) && validOff(off) -> +(MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(off) -> (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem) -(MOVBstore [off] {sym} ptr (MOVBconst [c]) mem) && validOff(off) -> +(MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(off) -> (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem) // Fold address offsets into constant stores. @@ -1086,16 +1020,16 @@ (CMPLconst (MOVLconst [x]) [y]) && int32(x)uint32(y) -> (FlagLT_UGT) (CMPLconst (MOVLconst [x]) [y]) && int32(x)>int32(y) && uint32(x) (FlagGT_ULT) (CMPLconst (MOVLconst [x]) [y]) && int32(x)>int32(y) && uint32(x)>uint32(y) -> (FlagGT_UGT) -(CMPWconst (MOVWconst [x]) [y]) && int16(x)==int16(y) -> (FlagEQ) -(CMPWconst (MOVWconst [x]) [y]) && int16(x) (FlagLT_ULT) -(CMPWconst (MOVWconst [x]) [y]) && int16(x)uint16(y) -> (FlagLT_UGT) -(CMPWconst (MOVWconst [x]) [y]) && int16(x)>int16(y) && uint16(x) (FlagGT_ULT) -(CMPWconst (MOVWconst [x]) [y]) && int16(x)>int16(y) && uint16(x)>uint16(y) -> (FlagGT_UGT) -(CMPBconst (MOVBconst [x]) [y]) && int8(x)==int8(y) -> (FlagEQ) -(CMPBconst (MOVBconst [x]) [y]) && int8(x) (FlagLT_ULT) -(CMPBconst (MOVBconst [x]) [y]) && int8(x)uint8(y) -> (FlagLT_UGT) -(CMPBconst (MOVBconst [x]) [y]) && int8(x)>int8(y) && uint8(x) (FlagGT_ULT) -(CMPBconst (MOVBconst [x]) [y]) && int8(x)>int8(y) && uint8(x)>uint8(y) -> (FlagGT_UGT) +(CMPWconst (MOVLconst [x]) [y]) && int16(x)==int16(y) -> (FlagEQ) +(CMPWconst (MOVLconst [x]) [y]) && int16(x) (FlagLT_ULT) +(CMPWconst (MOVLconst [x]) [y]) && int16(x)uint16(y) -> (FlagLT_UGT) +(CMPWconst (MOVLconst [x]) [y]) && int16(x)>int16(y) && uint16(x) (FlagGT_ULT) +(CMPWconst (MOVLconst [x]) [y]) && int16(x)>int16(y) && uint16(x)>uint16(y) -> (FlagGT_UGT) +(CMPBconst (MOVLconst [x]) [y]) && int8(x)==int8(y) -> (FlagEQ) +(CMPBconst (MOVLconst [x]) [y]) && int8(x) (FlagLT_ULT) +(CMPBconst (MOVLconst [x]) [y]) && int8(x)uint8(y) -> (FlagLT_UGT) +(CMPBconst (MOVLconst [x]) [y]) && int8(x)>int8(y) && uint8(x) (FlagGT_ULT) +(CMPBconst (MOVLconst [x]) [y]) && int8(x)>int8(y) && uint8(x)>uint8(y) -> (FlagGT_UGT) // Other known comparisons. (CMPQconst (MOVBQZX _) [c]) && 0xFF < c -> (FlagLT_ULT) @@ -1105,8 +1039,8 @@ (CMPQconst (SHRQconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 64 && (1< (FlagLT_ULT) (CMPQconst (ANDQconst _ [m]) [n]) && 0 <= m && m < n -> (FlagLT_ULT) (CMPLconst (ANDLconst _ [m]) [n]) && 0 <= int32(m) && int32(m) < int32(n) -> (FlagLT_ULT) -(CMPWconst (ANDWconst _ [m]) [n]) && 0 <= int16(m) && int16(m) < int16(n) -> (FlagLT_ULT) -(CMPBconst (ANDBconst _ [m]) [n]) && 0 <= int8(m) && int8(m) < int8(n) -> (FlagLT_ULT) +(CMPWconst (ANDLconst _ [m]) [n]) && 0 <= int16(m) && int16(m) < int16(n) -> (FlagLT_ULT) +(CMPBconst (ANDLconst _ [m]) [n]) && 0 <= int8(m) && int8(m) < int8(n) -> (FlagLT_ULT) // TODO: DIVxU also. // Absorb flag constants into SBB ops. @@ -1183,181 +1117,140 @@ (UGE (FlagGT_UGT) yes no) -> (First nil yes no) // Absorb flag constants into SETxx ops. -(SETEQ (FlagEQ)) -> (MOVBconst [1]) -(SETEQ (FlagLT_ULT)) -> (MOVBconst [0]) -(SETEQ (FlagLT_UGT)) -> (MOVBconst [0]) -(SETEQ (FlagGT_ULT)) -> (MOVBconst [0]) -(SETEQ (FlagGT_UGT)) -> (MOVBconst [0]) +(SETEQ (FlagEQ)) -> (MOVLconst [1]) +(SETEQ (FlagLT_ULT)) -> (MOVLconst [0]) +(SETEQ (FlagLT_UGT)) -> (MOVLconst [0]) +(SETEQ (FlagGT_ULT)) -> (MOVLconst [0]) +(SETEQ (FlagGT_UGT)) -> (MOVLconst [0]) -(SETNE (FlagEQ)) -> (MOVBconst [0]) -(SETNE (FlagLT_ULT)) -> (MOVBconst [1]) -(SETNE (FlagLT_UGT)) -> (MOVBconst [1]) -(SETNE (FlagGT_ULT)) -> (MOVBconst [1]) -(SETNE (FlagGT_UGT)) -> (MOVBconst [1]) +(SETNE (FlagEQ)) -> (MOVLconst [0]) +(SETNE (FlagLT_ULT)) -> (MOVLconst [1]) +(SETNE (FlagLT_UGT)) -> (MOVLconst [1]) +(SETNE (FlagGT_ULT)) -> (MOVLconst [1]) +(SETNE (FlagGT_UGT)) -> (MOVLconst [1]) -(SETL (FlagEQ)) -> (MOVBconst [0]) -(SETL (FlagLT_ULT)) -> (MOVBconst [1]) -(SETL (FlagLT_UGT)) -> (MOVBconst [1]) -(SETL (FlagGT_ULT)) -> (MOVBconst [0]) -(SETL (FlagGT_UGT)) -> (MOVBconst [0]) +(SETL (FlagEQ)) -> (MOVLconst [0]) +(SETL (FlagLT_ULT)) -> (MOVLconst [1]) +(SETL (FlagLT_UGT)) -> (MOVLconst [1]) +(SETL (FlagGT_ULT)) -> (MOVLconst [0]) +(SETL (FlagGT_UGT)) -> (MOVLconst [0]) -(SETLE (FlagEQ)) -> (MOVBconst [1]) -(SETLE (FlagLT_ULT)) -> (MOVBconst [1]) -(SETLE (FlagLT_UGT)) -> (MOVBconst [1]) -(SETLE (FlagGT_ULT)) -> (MOVBconst [0]) -(SETLE (FlagGT_UGT)) -> (MOVBconst [0]) +(SETLE (FlagEQ)) -> (MOVLconst [1]) +(SETLE (FlagLT_ULT)) -> (MOVLconst [1]) +(SETLE (FlagLT_UGT)) -> (MOVLconst [1]) +(SETLE (FlagGT_ULT)) -> (MOVLconst [0]) +(SETLE (FlagGT_UGT)) -> (MOVLconst [0]) -(SETG (FlagEQ)) -> (MOVBconst [0]) -(SETG (FlagLT_ULT)) -> (MOVBconst [0]) -(SETG (FlagLT_UGT)) -> (MOVBconst [0]) -(SETG (FlagGT_ULT)) -> (MOVBconst [1]) -(SETG (FlagGT_UGT)) -> (MOVBconst [1]) +(SETG (FlagEQ)) -> (MOVLconst [0]) +(SETG (FlagLT_ULT)) -> (MOVLconst [0]) +(SETG (FlagLT_UGT)) -> (MOVLconst [0]) +(SETG (FlagGT_ULT)) -> (MOVLconst [1]) +(SETG (FlagGT_UGT)) -> (MOVLconst [1]) -(SETGE (FlagEQ)) -> (MOVBconst [1]) -(SETGE (FlagLT_ULT)) -> (MOVBconst [0]) -(SETGE (FlagLT_UGT)) -> (MOVBconst [0]) -(SETGE (FlagGT_ULT)) -> (MOVBconst [1]) -(SETGE (FlagGT_UGT)) -> (MOVBconst [1]) +(SETGE (FlagEQ)) -> (MOVLconst [1]) +(SETGE (FlagLT_ULT)) -> (MOVLconst [0]) +(SETGE (FlagLT_UGT)) -> (MOVLconst [0]) +(SETGE (FlagGT_ULT)) -> (MOVLconst [1]) +(SETGE (FlagGT_UGT)) -> (MOVLconst [1]) -(SETB (FlagEQ)) -> (MOVBconst [0]) -(SETB (FlagLT_ULT)) -> (MOVBconst [1]) -(SETB (FlagLT_UGT)) -> (MOVBconst [0]) -(SETB (FlagGT_ULT)) -> (MOVBconst [1]) -(SETB (FlagGT_UGT)) -> (MOVBconst [0]) +(SETB (FlagEQ)) -> (MOVLconst [0]) +(SETB (FlagLT_ULT)) -> (MOVLconst [1]) +(SETB (FlagLT_UGT)) -> (MOVLconst [0]) +(SETB (FlagGT_ULT)) -> (MOVLconst [1]) +(SETB (FlagGT_UGT)) -> (MOVLconst [0]) -(SETBE (FlagEQ)) -> (MOVBconst [1]) -(SETBE (FlagLT_ULT)) -> (MOVBconst [1]) -(SETBE (FlagLT_UGT)) -> (MOVBconst [0]) -(SETBE (FlagGT_ULT)) -> (MOVBconst [1]) -(SETBE (FlagGT_UGT)) -> (MOVBconst [0]) +(SETBE (FlagEQ)) -> (MOVLconst [1]) +(SETBE (FlagLT_ULT)) -> (MOVLconst [1]) +(SETBE (FlagLT_UGT)) -> (MOVLconst [0]) +(SETBE (FlagGT_ULT)) -> (MOVLconst [1]) +(SETBE (FlagGT_UGT)) -> (MOVLconst [0]) -(SETA (FlagEQ)) -> (MOVBconst [0]) -(SETA (FlagLT_ULT)) -> (MOVBconst [0]) -(SETA (FlagLT_UGT)) -> (MOVBconst [1]) -(SETA (FlagGT_ULT)) -> (MOVBconst [0]) -(SETA (FlagGT_UGT)) -> (MOVBconst [1]) +(SETA (FlagEQ)) -> (MOVLconst [0]) +(SETA (FlagLT_ULT)) -> (MOVLconst [0]) +(SETA (FlagLT_UGT)) -> (MOVLconst [1]) +(SETA (FlagGT_ULT)) -> (MOVLconst [0]) +(SETA (FlagGT_UGT)) -> (MOVLconst [1]) -(SETAE (FlagEQ)) -> (MOVBconst [1]) -(SETAE (FlagLT_ULT)) -> (MOVBconst [0]) -(SETAE (FlagLT_UGT)) -> (MOVBconst [1]) -(SETAE (FlagGT_ULT)) -> (MOVBconst [0]) -(SETAE (FlagGT_UGT)) -> (MOVBconst [1]) +(SETAE (FlagEQ)) -> (MOVLconst [1]) +(SETAE (FlagLT_ULT)) -> (MOVLconst [0]) +(SETAE (FlagLT_UGT)) -> (MOVLconst [1]) +(SETAE (FlagGT_ULT)) -> (MOVLconst [0]) +(SETAE (FlagGT_UGT)) -> (MOVLconst [1]) // Remove redundant *const ops (ADDQconst [0] x) -> x (ADDLconst [c] x) && int32(c)==0 -> x -(ADDWconst [c] x) && int16(c)==0 -> x -(ADDBconst [c] x) && int8(c)==0 -> x (SUBQconst [0] x) -> x (SUBLconst [c] x) && int32(c) == 0 -> x -(SUBWconst [c] x) && int16(c) == 0 -> x -(SUBBconst [c] x) && int8(c) == 0 -> x (ANDQconst [0] _) -> (MOVQconst [0]) (ANDLconst [c] _) && int32(c)==0 -> (MOVLconst [0]) -(ANDWconst [c] _) && int16(c)==0 -> (MOVWconst [0]) -(ANDBconst [c] _) && int8(c)==0 -> (MOVBconst [0]) (ANDQconst [-1] x) -> x (ANDLconst [c] x) && int32(c)==-1 -> x -(ANDWconst [c] x) && int16(c)==-1 -> x -(ANDBconst [c] x) && int8(c)==-1 -> x (ORQconst [0] x) -> x (ORLconst [c] x) && int32(c)==0 -> x -(ORWconst [c] x) && int16(c)==0 -> x -(ORBconst [c] x) && int8(c)==0 -> x (ORQconst [-1] _) -> (MOVQconst [-1]) (ORLconst [c] _) && int32(c)==-1 -> (MOVLconst [-1]) -(ORWconst [c] _) && int16(c)==-1 -> (MOVWconst [-1]) -(ORBconst [c] _) && int8(c)==-1 -> (MOVBconst [-1]) (XORQconst [0] x) -> x (XORLconst [c] x) && int32(c)==0 -> x -(XORWconst [c] x) && int16(c)==0 -> x -(XORBconst [c] x) && int8(c)==0 -> x +// TODO: since we got rid of the W/B versions, we might miss +// things like (ANDLconst [0x100] x) which were formerly +// (ANDBconst [0] x). Probably doesn't happen very often. +// If we cared, we might do: +// (ANDLconst [c] x) && t.Size()==1 && int8(x)==0 -> (MOVLconst [0]) // Convert constant subtracts to constant adds (SUBQconst [c] x) && c != -(1<<31) -> (ADDQconst [-c] x) (SUBLconst [c] x) -> (ADDLconst [int64(int32(-c))] x) -(SUBWconst [c] x) -> (ADDWconst [int64(int16(-c))] x) -(SUBBconst [c] x) -> (ADDBconst [int64(int8(-c))] x) // generic constant folding // TODO: more of this (ADDQconst [c] (MOVQconst [d])) -> (MOVQconst [c+d]) (ADDLconst [c] (MOVLconst [d])) -> (MOVLconst [int64(int32(c+d))]) -(ADDWconst [c] (MOVWconst [d])) -> (MOVWconst [int64(int16(c+d))]) -(ADDBconst [c] (MOVBconst [d])) -> (MOVBconst [int64(int8(c+d))]) (ADDQconst [c] (ADDQconst [d] x)) && is32Bit(c+d) -> (ADDQconst [c+d] x) (ADDLconst [c] (ADDLconst [d] x)) -> (ADDLconst [int64(int32(c+d))] x) -(ADDWconst [c] (ADDWconst [d] x)) -> (ADDWconst [int64(int16(c+d))] x) -(ADDBconst [c] (ADDBconst [d] x)) -> (ADDBconst [int64(int8(c+d))] x) (SUBQconst (MOVQconst [d]) [c]) -> (MOVQconst [d-c]) (SUBLconst (MOVLconst [d]) [c]) -> (MOVLconst [int64(int32(d-c))]) -(SUBWconst (MOVWconst [d]) [c]) -> (MOVWconst [int64(int16(d-c))]) -(SUBBconst (MOVBconst [d]) [c]) -> (MOVBconst [int64(int8(d-c))]) (SUBQconst (SUBQconst x [d]) [c]) && is32Bit(-c-d) -> (ADDQconst [-c-d] x) (SUBLconst (SUBLconst x [d]) [c]) -> (ADDLconst [int64(int32(-c-d))] x) -(SUBWconst (SUBWconst x [d]) [c]) -> (ADDWconst [int64(int16(-c-d))] x) -(SUBBconst (SUBBconst x [d]) [c]) -> (ADDBconst [int64(int8(-c-d))] x) (SARQconst [c] (MOVQconst [d])) -> (MOVQconst [d>>uint64(c)]) (SARLconst [c] (MOVQconst [d])) -> (MOVQconst [d>>uint64(c)]) (SARWconst [c] (MOVQconst [d])) -> (MOVQconst [d>>uint64(c)]) (SARBconst [c] (MOVQconst [d])) -> (MOVQconst [d>>uint64(c)]) (NEGQ (MOVQconst [c])) -> (MOVQconst [-c]) (NEGL (MOVLconst [c])) -> (MOVLconst [int64(int32(-c))]) -(NEGW (MOVWconst [c])) -> (MOVWconst [int64(int16(-c))]) -(NEGB (MOVBconst [c])) -> (MOVBconst [int64(int8(-c))]) (MULQconst [c] (MOVQconst [d])) -> (MOVQconst [c*d]) (MULLconst [c] (MOVLconst [d])) -> (MOVLconst [int64(int32(c*d))]) -(MULWconst [c] (MOVWconst [d])) -> (MOVWconst [int64(int16(c*d))]) -(MULBconst [c] (MOVBconst [d])) -> (MOVBconst [int64(int8(c*d))]) (ANDQconst [c] (MOVQconst [d])) -> (MOVQconst [c&d]) (ANDLconst [c] (MOVLconst [d])) -> (MOVLconst [c&d]) -(ANDWconst [c] (MOVWconst [d])) -> (MOVWconst [c&d]) -(ANDBconst [c] (MOVBconst [d])) -> (MOVBconst [c&d]) (ORQconst [c] (MOVQconst [d])) -> (MOVQconst [c|d]) (ORLconst [c] (MOVLconst [d])) -> (MOVLconst [c|d]) -(ORWconst [c] (MOVWconst [d])) -> (MOVWconst [c|d]) -(ORBconst [c] (MOVBconst [d])) -> (MOVBconst [c|d]) (XORQconst [c] (MOVQconst [d])) -> (MOVQconst [c^d]) (XORLconst [c] (MOVLconst [d])) -> (MOVLconst [c^d]) -(XORWconst [c] (MOVWconst [d])) -> (MOVWconst [c^d]) -(XORBconst [c] (MOVBconst [d])) -> (MOVBconst [c^d]) (NOTQ (MOVQconst [c])) -> (MOVQconst [^c]) (NOTL (MOVLconst [c])) -> (MOVLconst [^c]) -(NOTW (MOVWconst [c])) -> (MOVWconst [^c]) -(NOTB (MOVBconst [c])) -> (MOVBconst [^c]) // generic simplifications // TODO: more of this (ADDQ x (NEGQ y)) -> (SUBQ x y) (ADDL x (NEGL y)) -> (SUBL x y) -(ADDW x (NEGW y)) -> (SUBW x y) -(ADDB x (NEGB y)) -> (SUBB x y) (SUBQ x x) -> (MOVQconst [0]) (SUBL x x) -> (MOVLconst [0]) -(SUBW x x) -> (MOVWconst [0]) -(SUBB x x) -> (MOVBconst [0]) (ANDQ x x) -> x (ANDL x x) -> x -(ANDW x x) -> x -(ANDB x x) -> x (ORQ x x) -> x (ORL x x) -> x -(ORW x x) -> x -(ORB x x) -> x (XORQ x x) -> (MOVQconst [0]) (XORL x x) -> (MOVLconst [0]) -(XORW x x) -> (MOVWconst [0]) -(XORB x x) -> (MOVBconst [0]) // checking AND against 0. (CMPQconst (ANDQ x y) [0]) -> (TESTQ x y) (CMPLconst (ANDL x y) [0]) -> (TESTL x y) -(CMPWconst (ANDW x y) [0]) -> (TESTW x y) -(CMPBconst (ANDB x y) [0]) -> (TESTB x y) +(CMPWconst (ANDL x y) [0]) -> (TESTW x y) +(CMPBconst (ANDL x y) [0]) -> (TESTB x y) (CMPQconst (ANDQconst [c] x) [0]) -> (TESTQconst [c] x) (CMPLconst (ANDLconst [c] x) [0]) -> (TESTLconst [c] x) -(CMPWconst (ANDWconst [c] x) [0]) -> (TESTWconst [c] x) -(CMPBconst (ANDBconst [c] x) [0]) -> (TESTBconst [c] x) +(CMPWconst (ANDLconst [c] x) [0]) -> (TESTWconst [int64(int16(c))] x) +(CMPBconst (ANDLconst [c] x) [0]) -> (TESTBconst [int64(int8(c))] x) // TEST %reg,%reg is shorter than CMP (CMPQconst x [0]) -> (TESTQ x x) @@ -1368,8 +1261,8 @@ // Combining byte loads into larger (unaligned) loads. // There are many ways these combinations could occur. This is // designed to match the way encoding/binary.LittleEndian does it. -(ORW x0:(MOVBload [i] {s} p mem) - s0:(SHLWconst [8] x1:(MOVBload [i+1] {s} p mem))) +(ORL x0:(MOVBload [i] {s} p mem) + s0:(SHLLconst [8] x1:(MOVBload [i+1] {s} p mem))) && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 @@ -1459,8 +1352,8 @@ && clobber(o5) -> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVQload [i] {s} p mem) -(ORW x0:(MOVBloadidx1 [i] {s} p idx mem) - s0:(SHLWconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) +(ORL x0:(MOVBloadidx1 [i] {s} p idx mem) + s0:(SHLLconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 diff --git a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go index 88bb6bc542..35eeb61941 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go @@ -190,30 +190,18 @@ func init() { // binary ops {name: "ADDQ", argLength: 2, reg: gp21sp, asm: "ADDQ", commutative: true}, // arg0 + arg1 {name: "ADDL", argLength: 2, reg: gp21sp, asm: "ADDL", commutative: true}, // arg0 + arg1 - {name: "ADDW", argLength: 2, reg: gp21sp, asm: "ADDL", commutative: true}, // arg0 + arg1 - {name: "ADDB", argLength: 2, reg: gp21sp, asm: "ADDL", commutative: true}, // arg0 + arg1 {name: "ADDQconst", argLength: 1, reg: gp11sp, asm: "ADDQ", aux: "Int64", typ: "UInt64"}, // arg0 + auxint {name: "ADDLconst", argLength: 1, reg: gp11sp, asm: "ADDL", aux: "Int32"}, // arg0 + auxint - {name: "ADDWconst", argLength: 1, reg: gp11sp, asm: "ADDL", aux: "Int16"}, // arg0 + auxint - {name: "ADDBconst", argLength: 1, reg: gp11sp, asm: "ADDL", aux: "Int8"}, // arg0 + auxint {name: "SUBQ", argLength: 2, reg: gp21, asm: "SUBQ", resultInArg0: true}, // arg0 - arg1 {name: "SUBL", argLength: 2, reg: gp21, asm: "SUBL", resultInArg0: true}, // arg0 - arg1 - {name: "SUBW", argLength: 2, reg: gp21, asm: "SUBL", resultInArg0: true}, // arg0 - arg1 - {name: "SUBB", argLength: 2, reg: gp21, asm: "SUBL", resultInArg0: true}, // arg0 - arg1 {name: "SUBQconst", argLength: 1, reg: gp11, asm: "SUBQ", aux: "Int64", resultInArg0: true}, // arg0 - auxint {name: "SUBLconst", argLength: 1, reg: gp11, asm: "SUBL", aux: "Int32", resultInArg0: true}, // arg0 - auxint - {name: "SUBWconst", argLength: 1, reg: gp11, asm: "SUBL", aux: "Int16", resultInArg0: true}, // arg0 - auxint - {name: "SUBBconst", argLength: 1, reg: gp11, asm: "SUBL", aux: "Int8", resultInArg0: true}, // arg0 - auxint {name: "MULQ", argLength: 2, reg: gp21, asm: "IMULQ", commutative: true, resultInArg0: true}, // arg0 * arg1 {name: "MULL", argLength: 2, reg: gp21, asm: "IMULL", commutative: true, resultInArg0: true}, // arg0 * arg1 - {name: "MULW", argLength: 2, reg: gp21, asm: "IMULW", commutative: true, resultInArg0: true}, // arg0 * arg1 - {name: "MULB", argLength: 2, reg: gp21, asm: "IMULW", commutative: true, resultInArg0: true}, // arg0 * arg1 {name: "MULQconst", argLength: 1, reg: gp11, asm: "IMULQ", aux: "Int64", resultInArg0: true}, // arg0 * auxint {name: "MULLconst", argLength: 1, reg: gp11, asm: "IMULL", aux: "Int32", resultInArg0: true}, // arg0 * auxint - {name: "MULWconst", argLength: 1, reg: gp11, asm: "IMULW", aux: "Int16", resultInArg0: true}, // arg0 * auxint - {name: "MULBconst", argLength: 1, reg: gp11, asm: "IMULW", aux: "Int8", resultInArg0: true}, // arg0 * auxint {name: "HMULQ", argLength: 2, reg: gp11hmul, asm: "IMULQ"}, // (arg0 * arg1) >> width {name: "HMULL", argLength: 2, reg: gp11hmul, asm: "IMULL"}, // (arg0 * arg1) >> width @@ -242,30 +230,18 @@ func init() { {name: "ANDQ", argLength: 2, reg: gp21, asm: "ANDQ", commutative: true, resultInArg0: true}, // arg0 & arg1 {name: "ANDL", argLength: 2, reg: gp21, asm: "ANDL", commutative: true, resultInArg0: true}, // arg0 & arg1 - {name: "ANDW", argLength: 2, reg: gp21, asm: "ANDL", commutative: true, resultInArg0: true}, // arg0 & arg1 - {name: "ANDB", argLength: 2, reg: gp21, asm: "ANDL", commutative: true, resultInArg0: true}, // arg0 & arg1 {name: "ANDQconst", argLength: 1, reg: gp11, asm: "ANDQ", aux: "Int64", resultInArg0: true}, // arg0 & auxint {name: "ANDLconst", argLength: 1, reg: gp11, asm: "ANDL", aux: "Int32", resultInArg0: true}, // arg0 & auxint - {name: "ANDWconst", argLength: 1, reg: gp11, asm: "ANDL", aux: "Int16", resultInArg0: true}, // arg0 & auxint - {name: "ANDBconst", argLength: 1, reg: gp11, asm: "ANDL", aux: "Int8", resultInArg0: true}, // arg0 & auxint {name: "ORQ", argLength: 2, reg: gp21, asm: "ORQ", commutative: true, resultInArg0: true}, // arg0 | arg1 {name: "ORL", argLength: 2, reg: gp21, asm: "ORL", commutative: true, resultInArg0: true}, // arg0 | arg1 - {name: "ORW", argLength: 2, reg: gp21, asm: "ORL", commutative: true, resultInArg0: true}, // arg0 | arg1 - {name: "ORB", argLength: 2, reg: gp21, asm: "ORL", commutative: true, resultInArg0: true}, // arg0 | arg1 {name: "ORQconst", argLength: 1, reg: gp11, asm: "ORQ", aux: "Int64", resultInArg0: true}, // arg0 | auxint {name: "ORLconst", argLength: 1, reg: gp11, asm: "ORL", aux: "Int32", resultInArg0: true}, // arg0 | auxint - {name: "ORWconst", argLength: 1, reg: gp11, asm: "ORL", aux: "Int16", resultInArg0: true}, // arg0 | auxint - {name: "ORBconst", argLength: 1, reg: gp11, asm: "ORL", aux: "Int8", resultInArg0: true}, // arg0 | auxint {name: "XORQ", argLength: 2, reg: gp21, asm: "XORQ", commutative: true, resultInArg0: true}, // arg0 ^ arg1 {name: "XORL", argLength: 2, reg: gp21, asm: "XORL", commutative: true, resultInArg0: true}, // arg0 ^ arg1 - {name: "XORW", argLength: 2, reg: gp21, asm: "XORL", commutative: true, resultInArg0: true}, // arg0 ^ arg1 - {name: "XORB", argLength: 2, reg: gp21, asm: "XORL", commutative: true, resultInArg0: true}, // arg0 ^ arg1 {name: "XORQconst", argLength: 1, reg: gp11, asm: "XORQ", aux: "Int64", resultInArg0: true}, // arg0 ^ auxint {name: "XORLconst", argLength: 1, reg: gp11, asm: "XORL", aux: "Int32", resultInArg0: true}, // arg0 ^ auxint - {name: "XORWconst", argLength: 1, reg: gp11, asm: "XORL", aux: "Int16", resultInArg0: true}, // arg0 ^ auxint - {name: "XORBconst", argLength: 1, reg: gp11, asm: "XORL", aux: "Int8", resultInArg0: true}, // arg0 ^ auxint {name: "CMPQ", argLength: 2, reg: gp2flags, asm: "CMPQ", typ: "Flags"}, // arg0 compare to arg1 {name: "CMPL", argLength: 2, reg: gp2flags, asm: "CMPL", typ: "Flags"}, // arg0 compare to arg1 @@ -290,12 +266,8 @@ func init() { {name: "SHLQ", argLength: 2, reg: gp21shift, asm: "SHLQ", resultInArg0: true}, // arg0 << arg1, shift amount is mod 64 {name: "SHLL", argLength: 2, reg: gp21shift, asm: "SHLL", resultInArg0: true}, // arg0 << arg1, shift amount is mod 32 - {name: "SHLW", argLength: 2, reg: gp21shift, asm: "SHLL", resultInArg0: true}, // arg0 << arg1, shift amount is mod 32 - {name: "SHLB", argLength: 2, reg: gp21shift, asm: "SHLL", resultInArg0: true}, // arg0 << arg1, shift amount is mod 32 {name: "SHLQconst", argLength: 1, reg: gp11, asm: "SHLQ", aux: "Int64", resultInArg0: true}, // arg0 << auxint, shift amount 0-63 {name: "SHLLconst", argLength: 1, reg: gp11, asm: "SHLL", aux: "Int32", resultInArg0: true}, // arg0 << auxint, shift amount 0-31 - {name: "SHLWconst", argLength: 1, reg: gp11, asm: "SHLL", aux: "Int16", resultInArg0: true}, // arg0 << auxint, shift amount 0-31 - {name: "SHLBconst", argLength: 1, reg: gp11, asm: "SHLL", aux: "Int8", resultInArg0: true}, // arg0 << auxint, shift amount 0-31 // Note: x86 is weird, the 16 and 8 byte shifts still use all 5 bits of shift amount! {name: "SHRQ", argLength: 2, reg: gp21shift, asm: "SHRQ", resultInArg0: true}, // unsigned arg0 >> arg1, shift amount is mod 64 @@ -324,13 +296,9 @@ func init() { // unary ops {name: "NEGQ", argLength: 1, reg: gp11, asm: "NEGQ", resultInArg0: true}, // -arg0 {name: "NEGL", argLength: 1, reg: gp11, asm: "NEGL", resultInArg0: true}, // -arg0 - {name: "NEGW", argLength: 1, reg: gp11, asm: "NEGL", resultInArg0: true}, // -arg0 - {name: "NEGB", argLength: 1, reg: gp11, asm: "NEGL", resultInArg0: true}, // -arg0 {name: "NOTQ", argLength: 1, reg: gp11, asm: "NOTQ", resultInArg0: true}, // ^arg0 {name: "NOTL", argLength: 1, reg: gp11, asm: "NOTL", resultInArg0: true}, // ^arg0 - {name: "NOTW", argLength: 1, reg: gp11, asm: "NOTL", resultInArg0: true}, // ^arg0 - {name: "NOTB", argLength: 1, reg: gp11, asm: "NOTL", resultInArg0: true}, // ^arg0 {name: "BSFQ", argLength: 1, reg: gp11, asm: "BSFQ"}, // arg0 # of low-order zeroes ; undef if zero {name: "BSFL", argLength: 1, reg: gp11, asm: "BSFL"}, // arg0 # of low-order zeroes ; undef if zero @@ -385,8 +353,6 @@ func init() { {name: "MOVLQSX", argLength: 1, reg: gp11nf, asm: "MOVLQSX"}, // sign extend arg0 from int32 to int64 {name: "MOVLQZX", argLength: 1, reg: gp11nf, asm: "MOVLQZX"}, // zero extend arg0 from int32 to int64 - {name: "MOVBconst", reg: gp01, asm: "MOVB", typ: "UInt8", aux: "Int8", rematerializeable: true}, // 8 low bits of auxint - {name: "MOVWconst", reg: gp01, asm: "MOVW", typ: "UInt16", aux: "Int16", rematerializeable: true}, // 16 low bits of auxint {name: "MOVLconst", reg: gp01, asm: "MOVL", typ: "UInt32", aux: "Int32", rematerializeable: true}, // 32 low bits of auxint {name: "MOVQconst", reg: gp01, asm: "MOVQ", typ: "UInt64", aux: "Int64", rematerializeable: true}, // auxint diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 381422adfd..70af757194 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -120,28 +120,16 @@ const ( OpAMD64MOVSDstoreidx8 OpAMD64ADDQ OpAMD64ADDL - OpAMD64ADDW - OpAMD64ADDB OpAMD64ADDQconst OpAMD64ADDLconst - OpAMD64ADDWconst - OpAMD64ADDBconst OpAMD64SUBQ OpAMD64SUBL - OpAMD64SUBW - OpAMD64SUBB OpAMD64SUBQconst OpAMD64SUBLconst - OpAMD64SUBWconst - OpAMD64SUBBconst OpAMD64MULQ OpAMD64MULL - OpAMD64MULW - OpAMD64MULB OpAMD64MULQconst OpAMD64MULLconst - OpAMD64MULWconst - OpAMD64MULBconst OpAMD64HMULQ OpAMD64HMULL OpAMD64HMULW @@ -165,28 +153,16 @@ const ( OpAMD64MODWU OpAMD64ANDQ OpAMD64ANDL - OpAMD64ANDW - OpAMD64ANDB OpAMD64ANDQconst OpAMD64ANDLconst - OpAMD64ANDWconst - OpAMD64ANDBconst OpAMD64ORQ OpAMD64ORL - OpAMD64ORW - OpAMD64ORB OpAMD64ORQconst OpAMD64ORLconst - OpAMD64ORWconst - OpAMD64ORBconst OpAMD64XORQ OpAMD64XORL - OpAMD64XORW - OpAMD64XORB OpAMD64XORQconst OpAMD64XORLconst - OpAMD64XORWconst - OpAMD64XORBconst OpAMD64CMPQ OpAMD64CMPL OpAMD64CMPW @@ -207,12 +183,8 @@ const ( OpAMD64TESTBconst OpAMD64SHLQ OpAMD64SHLL - OpAMD64SHLW - OpAMD64SHLB OpAMD64SHLQconst OpAMD64SHLLconst - OpAMD64SHLWconst - OpAMD64SHLBconst OpAMD64SHRQ OpAMD64SHRL OpAMD64SHRW @@ -235,12 +207,8 @@ const ( OpAMD64ROLBconst OpAMD64NEGQ OpAMD64NEGL - OpAMD64NEGW - OpAMD64NEGB OpAMD64NOTQ OpAMD64NOTL - OpAMD64NOTW - OpAMD64NOTB OpAMD64BSFQ OpAMD64BSFL OpAMD64BSFW @@ -280,8 +248,6 @@ const ( OpAMD64MOVWQZX OpAMD64MOVLQSX OpAMD64MOVLQZX - OpAMD64MOVBconst - OpAMD64MOVWconst OpAMD64MOVLconst OpAMD64MOVQconst OpAMD64CVTTSD2SL @@ -1002,38 +968,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "ADDW", - argLen: 2, - commutative: true, - asm: x86.AADDL, - reg: regInfo{ - inputs: []inputInfo{ - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, - { - name: "ADDB", - argLen: 2, - commutative: true, - asm: x86.AADDL, - reg: regInfo{ - inputs: []inputInfo{ - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, { name: "ADDQconst", auxType: auxInt64, @@ -1064,36 +998,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "ADDWconst", - auxType: auxInt16, - argLen: 1, - asm: x86.AADDL, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, - { - name: "ADDBconst", - auxType: auxInt8, - argLen: 1, - asm: x86.AADDL, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, { name: "SUBQ", argLen: 2, @@ -1126,38 +1030,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "SUBW", - argLen: 2, - resultInArg0: true, - asm: x86.ASUBL, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, - { - name: "SUBB", - argLen: 2, - resultInArg0: true, - asm: x86.ASUBL, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, { name: "SUBQconst", auxType: auxInt64, @@ -1190,38 +1062,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "SUBWconst", - auxType: auxInt16, - argLen: 1, - resultInArg0: true, - asm: x86.ASUBL, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, - { - name: "SUBBconst", - auxType: auxInt8, - argLen: 1, - resultInArg0: true, - asm: x86.ASUBL, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, { name: "MULQ", argLen: 2, @@ -1256,40 +1096,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "MULW", - argLen: 2, - commutative: true, - resultInArg0: true, - asm: x86.AIMULW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, - { - name: "MULB", - argLen: 2, - commutative: true, - resultInArg0: true, - asm: x86.AIMULW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, { name: "MULQconst", auxType: auxInt64, @@ -1322,38 +1128,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "MULWconst", - auxType: auxInt16, - argLen: 1, - resultInArg0: true, - asm: x86.AIMULW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, - { - name: "MULBconst", - auxType: auxInt8, - argLen: 1, - resultInArg0: true, - asm: x86.AIMULW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, { name: "HMULQ", argLen: 2, @@ -1704,40 +1478,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "ANDW", - argLen: 2, - commutative: true, - resultInArg0: true, - asm: x86.AANDL, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, - { - name: "ANDB", - argLen: 2, - commutative: true, - resultInArg0: true, - asm: x86.AANDL, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, { name: "ANDQconst", auxType: auxInt64, @@ -1770,38 +1510,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "ANDWconst", - auxType: auxInt16, - argLen: 1, - resultInArg0: true, - asm: x86.AANDL, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, - { - name: "ANDBconst", - auxType: auxInt8, - argLen: 1, - resultInArg0: true, - asm: x86.AANDL, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, { name: "ORQ", argLen: 2, @@ -1836,40 +1544,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "ORW", - argLen: 2, - commutative: true, - resultInArg0: true, - asm: x86.AORL, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, - { - name: "ORB", - argLen: 2, - commutative: true, - resultInArg0: true, - asm: x86.AORL, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, { name: "ORQconst", auxType: auxInt64, @@ -1902,38 +1576,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "ORWconst", - auxType: auxInt16, - argLen: 1, - resultInArg0: true, - asm: x86.AORL, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, - { - name: "ORBconst", - auxType: auxInt8, - argLen: 1, - resultInArg0: true, - asm: x86.AORL, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, { name: "XORQ", argLen: 2, @@ -1968,40 +1610,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "XORW", - argLen: 2, - commutative: true, - resultInArg0: true, - asm: x86.AXORL, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, - { - name: "XORB", - argLen: 2, - commutative: true, - resultInArg0: true, - asm: x86.AXORL, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, { name: "XORQconst", auxType: auxInt64, @@ -2034,38 +1642,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "XORWconst", - auxType: auxInt16, - argLen: 1, - resultInArg0: true, - asm: x86.AXORL, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, - { - name: "XORBconst", - auxType: auxInt8, - argLen: 1, - resultInArg0: true, - asm: x86.AXORL, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, { name: "CMPQ", argLen: 2, @@ -2350,38 +1926,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "SHLW", - argLen: 2, - resultInArg0: true, - asm: x86.ASHLL, - reg: regInfo{ - inputs: []inputInfo{ - {1, 2}, // CX - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, - { - name: "SHLB", - argLen: 2, - resultInArg0: true, - asm: x86.ASHLL, - reg: regInfo{ - inputs: []inputInfo{ - {1, 2}, // CX - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, { name: "SHLQconst", auxType: auxInt64, @@ -2414,38 +1958,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "SHLWconst", - auxType: auxInt16, - argLen: 1, - resultInArg0: true, - asm: x86.ASHLL, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, - { - name: "SHLBconst", - auxType: auxInt8, - argLen: 1, - resultInArg0: true, - asm: x86.ASHLL, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, { name: "SHRQ", argLen: 2, @@ -2796,36 +2308,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "NEGW", - argLen: 1, - resultInArg0: true, - asm: x86.ANEGL, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, - { - name: "NEGB", - argLen: 1, - resultInArg0: true, - asm: x86.ANEGL, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, { name: "NOTQ", argLen: 1, @@ -2856,36 +2338,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "NOTW", - argLen: 1, - resultInArg0: true, - asm: x86.ANOTL, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, - { - name: "NOTB", - argLen: 1, - resultInArg0: true, - asm: x86.ANOTL, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, { name: "BSFQ", argLen: 1, @@ -3429,30 +2881,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "MOVBconst", - auxType: auxInt8, - argLen: 0, - rematerializeable: true, - asm: x86.AMOVB, - reg: regInfo{ - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, - { - name: "MOVWconst", - auxType: auxInt16, - argLen: 0, - rematerializeable: true, - asm: x86.AMOVW, - reg: regInfo{ - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, { name: "MOVLconst", auxType: auxInt32, diff --git a/src/cmd/compile/internal/ssa/regalloc_test.go b/src/cmd/compile/internal/ssa/regalloc_test.go index 6f3f690f1e..cf8f452d12 100644 --- a/src/cmd/compile/internal/ssa/regalloc_test.go +++ b/src/cmd/compile/internal/ssa/regalloc_test.go @@ -11,8 +11,8 @@ func TestLiveControlOps(t *testing.T) { f := Fun(c, "entry", Bloc("entry", Valu("mem", OpInitMem, TypeMem, 0, nil), - Valu("x", OpAMD64MOVBconst, TypeInt8, 1, nil), - Valu("y", OpAMD64MOVBconst, TypeInt8, 2, nil), + Valu("x", OpAMD64MOVLconst, TypeInt8, 1, nil), + Valu("y", OpAMD64MOVLconst, TypeInt8, 2, nil), Valu("a", OpAMD64TESTB, TypeFlags, 0, nil, "x", "y"), Valu("b", OpAMD64TESTB, TypeFlags, 0, nil, "y", "x"), Eq("a", "if", "exit"), diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 8507959f96..e2c4240ae3 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -8,10 +8,6 @@ import "math" var _ = math.MinInt8 // in case not otherwise used func rewriteValueAMD64(v *Value, config *Config) bool { switch v.Op { - case OpAMD64ADDB: - return rewriteValueAMD64_OpAMD64ADDB(v, config) - case OpAMD64ADDBconst: - return rewriteValueAMD64_OpAMD64ADDBconst(v, config) case OpAMD64ADDL: return rewriteValueAMD64_OpAMD64ADDL(v, config) case OpAMD64ADDLconst: @@ -20,14 +16,6 @@ func rewriteValueAMD64(v *Value, config *Config) bool { return rewriteValueAMD64_OpAMD64ADDQ(v, config) case OpAMD64ADDQconst: return rewriteValueAMD64_OpAMD64ADDQconst(v, config) - case OpAMD64ADDW: - return rewriteValueAMD64_OpAMD64ADDW(v, config) - case OpAMD64ADDWconst: - return rewriteValueAMD64_OpAMD64ADDWconst(v, config) - case OpAMD64ANDB: - return rewriteValueAMD64_OpAMD64ANDB(v, config) - case OpAMD64ANDBconst: - return rewriteValueAMD64_OpAMD64ANDBconst(v, config) case OpAMD64ANDL: return rewriteValueAMD64_OpAMD64ANDL(v, config) case OpAMD64ANDLconst: @@ -36,10 +24,6 @@ func rewriteValueAMD64(v *Value, config *Config) bool { return rewriteValueAMD64_OpAMD64ANDQ(v, config) case OpAMD64ANDQconst: return rewriteValueAMD64_OpAMD64ANDQconst(v, config) - case OpAMD64ANDW: - return rewriteValueAMD64_OpAMD64ANDW(v, config) - case OpAMD64ANDWconst: - return rewriteValueAMD64_OpAMD64ANDWconst(v, config) case OpAdd16: return rewriteValueAMD64_OpAdd16(v, config) case OpAdd32: @@ -458,10 +442,6 @@ func rewriteValueAMD64(v *Value, config *Config) bool { return rewriteValueAMD64_OpAMD64MOVWstoreidx1(v, config) case OpAMD64MOVWstoreidx2: return rewriteValueAMD64_OpAMD64MOVWstoreidx2(v, config) - case OpAMD64MULB: - return rewriteValueAMD64_OpAMD64MULB(v, config) - case OpAMD64MULBconst: - return rewriteValueAMD64_OpAMD64MULBconst(v, config) case OpAMD64MULL: return rewriteValueAMD64_OpAMD64MULL(v, config) case OpAMD64MULLconst: @@ -470,10 +450,6 @@ func rewriteValueAMD64(v *Value, config *Config) bool { return rewriteValueAMD64_OpAMD64MULQ(v, config) case OpAMD64MULQconst: return rewriteValueAMD64_OpAMD64MULQconst(v, config) - case OpAMD64MULW: - return rewriteValueAMD64_OpAMD64MULW(v, config) - case OpAMD64MULWconst: - return rewriteValueAMD64_OpAMD64MULWconst(v, config) case OpMod16: return rewriteValueAMD64_OpMod16(v, config) case OpMod16u: @@ -504,22 +480,14 @@ func rewriteValueAMD64(v *Value, config *Config) bool { return rewriteValueAMD64_OpMul64F(v, config) case OpMul8: return rewriteValueAMD64_OpMul8(v, config) - case OpAMD64NEGB: - return rewriteValueAMD64_OpAMD64NEGB(v, config) case OpAMD64NEGL: return rewriteValueAMD64_OpAMD64NEGL(v, config) case OpAMD64NEGQ: return rewriteValueAMD64_OpAMD64NEGQ(v, config) - case OpAMD64NEGW: - return rewriteValueAMD64_OpAMD64NEGW(v, config) - case OpAMD64NOTB: - return rewriteValueAMD64_OpAMD64NOTB(v, config) case OpAMD64NOTL: return rewriteValueAMD64_OpAMD64NOTL(v, config) case OpAMD64NOTQ: return rewriteValueAMD64_OpAMD64NOTQ(v, config) - case OpAMD64NOTW: - return rewriteValueAMD64_OpAMD64NOTW(v, config) case OpNeg16: return rewriteValueAMD64_OpNeg16(v, config) case OpNeg32: @@ -550,10 +518,6 @@ func rewriteValueAMD64(v *Value, config *Config) bool { return rewriteValueAMD64_OpNilCheck(v, config) case OpNot: return rewriteValueAMD64_OpNot(v, config) - case OpAMD64ORB: - return rewriteValueAMD64_OpAMD64ORB(v, config) - case OpAMD64ORBconst: - return rewriteValueAMD64_OpAMD64ORBconst(v, config) case OpAMD64ORL: return rewriteValueAMD64_OpAMD64ORL(v, config) case OpAMD64ORLconst: @@ -562,10 +526,6 @@ func rewriteValueAMD64(v *Value, config *Config) bool { return rewriteValueAMD64_OpAMD64ORQ(v, config) case OpAMD64ORQconst: return rewriteValueAMD64_OpAMD64ORQconst(v, config) - case OpAMD64ORW: - return rewriteValueAMD64_OpAMD64ORW(v, config) - case OpAMD64ORWconst: - return rewriteValueAMD64_OpAMD64ORWconst(v, config) case OpOffPtr: return rewriteValueAMD64_OpOffPtr(v, config) case OpOr16: @@ -680,14 +640,10 @@ func rewriteValueAMD64(v *Value, config *Config) bool { return rewriteValueAMD64_OpAMD64SETLE(v, config) case OpAMD64SETNE: return rewriteValueAMD64_OpAMD64SETNE(v, config) - case OpAMD64SHLB: - return rewriteValueAMD64_OpAMD64SHLB(v, config) case OpAMD64SHLL: return rewriteValueAMD64_OpAMD64SHLL(v, config) case OpAMD64SHLQ: return rewriteValueAMD64_OpAMD64SHLQ(v, config) - case OpAMD64SHLW: - return rewriteValueAMD64_OpAMD64SHLW(v, config) case OpAMD64SHRB: return rewriteValueAMD64_OpAMD64SHRB(v, config) case OpAMD64SHRL: @@ -696,10 +652,6 @@ func rewriteValueAMD64(v *Value, config *Config) bool { return rewriteValueAMD64_OpAMD64SHRQ(v, config) case OpAMD64SHRW: return rewriteValueAMD64_OpAMD64SHRW(v, config) - case OpAMD64SUBB: - return rewriteValueAMD64_OpAMD64SUBB(v, config) - case OpAMD64SUBBconst: - return rewriteValueAMD64_OpAMD64SUBBconst(v, config) case OpAMD64SUBL: return rewriteValueAMD64_OpAMD64SUBL(v, config) case OpAMD64SUBLconst: @@ -708,10 +660,6 @@ func rewriteValueAMD64(v *Value, config *Config) bool { return rewriteValueAMD64_OpAMD64SUBQ(v, config) case OpAMD64SUBQconst: return rewriteValueAMD64_OpAMD64SUBQconst(v, config) - case OpAMD64SUBW: - return rewriteValueAMD64_OpAMD64SUBW(v, config) - case OpAMD64SUBWconst: - return rewriteValueAMD64_OpAMD64SUBWconst(v, config) case OpSignExt16to32: return rewriteValueAMD64_OpSignExt16to32(v, config) case OpSignExt16to64: @@ -756,10 +704,6 @@ func rewriteValueAMD64(v *Value, config *Config) bool { return rewriteValueAMD64_OpTrunc64to32(v, config) case OpTrunc64to8: return rewriteValueAMD64_OpTrunc64to8(v, config) - case OpAMD64XORB: - return rewriteValueAMD64_OpAMD64XORB(v, config) - case OpAMD64XORBconst: - return rewriteValueAMD64_OpAMD64XORBconst(v, config) case OpAMD64XORL: return rewriteValueAMD64_OpAMD64XORL(v, config) case OpAMD64XORLconst: @@ -768,10 +712,6 @@ func rewriteValueAMD64(v *Value, config *Config) bool { return rewriteValueAMD64_OpAMD64XORQ(v, config) case OpAMD64XORQconst: return rewriteValueAMD64_OpAMD64XORQconst(v, config) - case OpAMD64XORW: - return rewriteValueAMD64_OpAMD64XORW(v, config) - case OpAMD64XORWconst: - return rewriteValueAMD64_OpAMD64XORWconst(v, config) case OpXor16: return rewriteValueAMD64_OpXor16(v, config) case OpXor32: @@ -797,105 +737,6 @@ func rewriteValueAMD64(v *Value, config *Config) bool { } return false } -func rewriteValueAMD64_OpAMD64ADDB(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (ADDB x (MOVBconst [c])) - // cond: - // result: (ADDBconst [c] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVBconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64ADDBconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ADDB (MOVBconst [c]) x) - // cond: - // result: (ADDBconst [c] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVBconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - v.reset(OpAMD64ADDBconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ADDB x (NEGB y)) - // cond: - // result: (SUBB x y) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64NEGB { - break - } - y := v_1.Args[0] - v.reset(OpAMD64SUBB) - v.AddArg(x) - v.AddArg(y) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64ADDBconst(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (ADDBconst [c] x) - // cond: int8(c)==0 - // result: x - for { - c := v.AuxInt - x := v.Args[0] - if !(int8(c) == 0) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (ADDBconst [c] (MOVBconst [d])) - // cond: - // result: (MOVBconst [int64(int8(c+d))]) - for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVBconst { - break - } - d := v_0.AuxInt - v.reset(OpAMD64MOVBconst) - v.AuxInt = int64(int8(c + d)) - return true - } - // match: (ADDBconst [c] (ADDBconst [d] x)) - // cond: - // result: (ADDBconst [int64(int8(c+d))] x) - for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDBconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - v.reset(OpAMD64ADDBconst) - v.AuxInt = int64(int8(c + d)) - v.AddArg(x) - return true - } - return false -} func rewriteValueAMD64_OpAMD64ADDL(v *Value, config *Config) bool { b := v.Block _ = b @@ -1418,244 +1259,6 @@ func rewriteValueAMD64_OpAMD64ADDQconst(v *Value, config *Config) bool { } return false } -func rewriteValueAMD64_OpAMD64ADDW(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (ADDW x (MOVWconst [c])) - // cond: - // result: (ADDWconst [c] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVWconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64ADDWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ADDW (MOVWconst [c]) x) - // cond: - // result: (ADDWconst [c] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVWconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - v.reset(OpAMD64ADDWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ADDW x (NEGW y)) - // cond: - // result: (SUBW x y) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64NEGW { - break - } - y := v_1.Args[0] - v.reset(OpAMD64SUBW) - v.AddArg(x) - v.AddArg(y) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64ADDWconst(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (ADDWconst [c] x) - // cond: int16(c)==0 - // result: x - for { - c := v.AuxInt - x := v.Args[0] - if !(int16(c) == 0) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (ADDWconst [c] (MOVWconst [d])) - // cond: - // result: (MOVWconst [int64(int16(c+d))]) - for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVWconst { - break - } - d := v_0.AuxInt - v.reset(OpAMD64MOVWconst) - v.AuxInt = int64(int16(c + d)) - return true - } - // match: (ADDWconst [c] (ADDWconst [d] x)) - // cond: - // result: (ADDWconst [int64(int16(c+d))] x) - for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDWconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - v.reset(OpAMD64ADDWconst) - v.AuxInt = int64(int16(c + d)) - v.AddArg(x) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64ANDB(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (ANDB x (MOVLconst [c])) - // cond: - // result: (ANDBconst [c] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVLconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64ANDBconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ANDB (MOVLconst [c]) x) - // cond: - // result: (ANDBconst [c] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVLconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - v.reset(OpAMD64ANDBconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ANDB x (MOVBconst [c])) - // cond: - // result: (ANDBconst [c] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVBconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64ANDBconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ANDB (MOVBconst [c]) x) - // cond: - // result: (ANDBconst [c] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVBconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - v.reset(OpAMD64ANDBconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ANDB x x) - // cond: - // result: x - for { - x := v.Args[0] - if x != v.Args[1] { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64ANDBconst(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (ANDBconst [c] (ANDBconst [d] x)) - // cond: - // result: (ANDBconst [c & d] x) - for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpAMD64ANDBconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - v.reset(OpAMD64ANDBconst) - v.AuxInt = c & d - v.AddArg(x) - return true - } - // match: (ANDBconst [c] _) - // cond: int8(c)==0 - // result: (MOVBconst [0]) - for { - c := v.AuxInt - if !(int8(c) == 0) { - break - } - v.reset(OpAMD64MOVBconst) - v.AuxInt = 0 - return true - } - // match: (ANDBconst [c] x) - // cond: int8(c)==-1 - // result: x - for { - c := v.AuxInt - x := v.Args[0] - if !(int8(c) == -1) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (ANDBconst [c] (MOVBconst [d])) - // cond: - // result: (MOVBconst [c&d]) - for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVBconst { - break - } - d := v_0.AuxInt - v.reset(OpAMD64MOVBconst) - v.AuxInt = c & d - return true - } - return false -} func rewriteValueAMD64_OpAMD64ANDL(v *Value, config *Config) bool { b := v.Block _ = b @@ -1914,155 +1517,16 @@ func rewriteValueAMD64_OpAMD64ANDQconst(v *Value, config *Config) bool { } return false } -func rewriteValueAMD64_OpAMD64ANDW(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (ANDW x (MOVLconst [c])) - // cond: - // result: (ANDWconst [c] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVLconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64ANDWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ANDW (MOVLconst [c]) x) - // cond: - // result: (ANDWconst [c] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVLconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - v.reset(OpAMD64ANDWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ANDW x (MOVWconst [c])) - // cond: - // result: (ANDWconst [c] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVWconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64ANDWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ANDW (MOVWconst [c]) x) - // cond: - // result: (ANDWconst [c] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVWconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - v.reset(OpAMD64ANDWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ANDW x x) - // cond: - // result: x - for { - x := v.Args[0] - if x != v.Args[1] { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64ANDWconst(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (ANDWconst [c] (ANDWconst [d] x)) - // cond: - // result: (ANDWconst [c & d] x) - for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpAMD64ANDWconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - v.reset(OpAMD64ANDWconst) - v.AuxInt = c & d - v.AddArg(x) - return true - } - // match: (ANDWconst [c] _) - // cond: int16(c)==0 - // result: (MOVWconst [0]) - for { - c := v.AuxInt - if !(int16(c) == 0) { - break - } - v.reset(OpAMD64MOVWconst) - v.AuxInt = 0 - return true - } - // match: (ANDWconst [c] x) - // cond: int16(c)==-1 - // result: x - for { - c := v.AuxInt - x := v.Args[0] - if !(int16(c) == -1) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (ANDWconst [c] (MOVWconst [d])) - // cond: - // result: (MOVWconst [c&d]) - for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVWconst { - break - } - d := v_0.AuxInt - v.reset(OpAMD64MOVWconst) - v.AuxInt = c & d - return true - } - return false -} func rewriteValueAMD64_OpAdd16(v *Value, config *Config) bool { b := v.Block _ = b // match: (Add16 x y) // cond: - // result: (ADDW x y) + // result: (ADDL x y) for { x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64ADDW) + v.reset(OpAMD64ADDL) v.AddArg(x) v.AddArg(y) return true @@ -2138,11 +1602,11 @@ func rewriteValueAMD64_OpAdd8(v *Value, config *Config) bool { _ = b // match: (Add8 x y) // cond: - // result: (ADDB x y) + // result: (ADDL x y) for { x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64ADDB) + v.reset(OpAMD64ADDL) v.AddArg(x) v.AddArg(y) return true @@ -2186,11 +1650,11 @@ func rewriteValueAMD64_OpAnd16(v *Value, config *Config) bool { _ = b // match: (And16 x y) // cond: - // result: (ANDW x y) + // result: (ANDL x y) for { x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64ANDW) + v.reset(OpAMD64ANDL) v.AddArg(x) v.AddArg(y) return true @@ -2234,11 +1698,11 @@ func rewriteValueAMD64_OpAnd8(v *Value, config *Config) bool { _ = b // match: (And8 x y) // cond: - // result: (ANDB x y) + // result: (ANDL x y) for { x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64ANDB) + v.reset(OpAMD64ANDL) v.AddArg(x) v.AddArg(y) return true @@ -2565,27 +2029,27 @@ func rewriteValueAMD64_OpAMD64CMOVWEQconst(v *Value, config *Config) bool { func rewriteValueAMD64_OpAMD64CMPB(v *Value, config *Config) bool { b := v.Block _ = b - // match: (CMPB x (MOVBconst [c])) + // match: (CMPB x (MOVLconst [c])) // cond: - // result: (CMPBconst x [c]) + // result: (CMPBconst x [int64(int8(c))]) for { x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVBconst { + if v_1.Op != OpAMD64MOVLconst { break } c := v_1.AuxInt v.reset(OpAMD64CMPBconst) v.AddArg(x) - v.AuxInt = c + v.AuxInt = int64(int8(c)) return true } - // match: (CMPB (MOVBconst [c]) x) + // match: (CMPB (MOVLconst [c]) x) // cond: - // result: (InvertFlags (CMPBconst x [c])) + // result: (InvertFlags (CMPBconst x [int64(int8(c))])) for { v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVBconst { + if v_0.Op != OpAMD64MOVLconst { break } c := v_0.AuxInt @@ -2593,7 +2057,7 @@ func rewriteValueAMD64_OpAMD64CMPB(v *Value, config *Config) bool { v.reset(OpAMD64InvertFlags) v0 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) v0.AddArg(x) - v0.AuxInt = c + v0.AuxInt = int64(int8(c)) v.AddArg(v0) return true } @@ -2602,12 +2066,12 @@ func rewriteValueAMD64_OpAMD64CMPB(v *Value, config *Config) bool { func rewriteValueAMD64_OpAMD64CMPBconst(v *Value, config *Config) bool { b := v.Block _ = b - // match: (CMPBconst (MOVBconst [x]) [y]) + // match: (CMPBconst (MOVLconst [x]) [y]) // cond: int8(x)==int8(y) // result: (FlagEQ) for { v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVBconst { + if v_0.Op != OpAMD64MOVLconst { break } x := v_0.AuxInt @@ -2618,12 +2082,12 @@ func rewriteValueAMD64_OpAMD64CMPBconst(v *Value, config *Config) bool { v.reset(OpAMD64FlagEQ) return true } - // match: (CMPBconst (MOVBconst [x]) [y]) + // match: (CMPBconst (MOVLconst [x]) [y]) // cond: int8(x)uint8(y) // result: (FlagLT_UGT) for { v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVBconst { + if v_0.Op != OpAMD64MOVLconst { break } x := v_0.AuxInt @@ -2650,12 +2114,12 @@ func rewriteValueAMD64_OpAMD64CMPBconst(v *Value, config *Config) bool { v.reset(OpAMD64FlagLT_UGT) return true } - // match: (CMPBconst (MOVBconst [x]) [y]) + // match: (CMPBconst (MOVLconst [x]) [y]) // cond: int8(x)>int8(y) && uint8(x)int8(y) && uint8(x)>uint8(y) // result: (FlagGT_UGT) for { v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVBconst { + if v_0.Op != OpAMD64MOVLconst { break } x := v_0.AuxInt @@ -2682,12 +2146,12 @@ func rewriteValueAMD64_OpAMD64CMPBconst(v *Value, config *Config) bool { v.reset(OpAMD64FlagGT_UGT) return true } - // match: (CMPBconst (ANDBconst _ [m]) [n]) + // match: (CMPBconst (ANDLconst _ [m]) [n]) // cond: 0 <= int8(m) && int8(m) < int8(n) // result: (FlagLT_ULT) for { v_0 := v.Args[0] - if v_0.Op != OpAMD64ANDBconst { + if v_0.Op != OpAMD64ANDLconst { break } m := v_0.AuxInt @@ -2698,12 +2162,12 @@ func rewriteValueAMD64_OpAMD64CMPBconst(v *Value, config *Config) bool { v.reset(OpAMD64FlagLT_ULT) return true } - // match: (CMPBconst (ANDB x y) [0]) + // match: (CMPBconst (ANDL x y) [0]) // cond: // result: (TESTB x y) for { v_0 := v.Args[0] - if v_0.Op != OpAMD64ANDB { + if v_0.Op != OpAMD64ANDL { break } x := v_0.Args[0] @@ -2716,12 +2180,12 @@ func rewriteValueAMD64_OpAMD64CMPBconst(v *Value, config *Config) bool { v.AddArg(y) return true } - // match: (CMPBconst (ANDBconst [c] x) [0]) + // match: (CMPBconst (ANDLconst [c] x) [0]) // cond: - // result: (TESTBconst [c] x) + // result: (TESTBconst [int64(int8(c))] x) for { v_0 := v.Args[0] - if v_0.Op != OpAMD64ANDBconst { + if v_0.Op != OpAMD64ANDLconst { break } c := v_0.AuxInt @@ -2730,7 +2194,7 @@ func rewriteValueAMD64_OpAMD64CMPBconst(v *Value, config *Config) bool { break } v.reset(OpAMD64TESTBconst) - v.AuxInt = c + v.AuxInt = int64(int8(c)) v.AddArg(x) return true } @@ -3209,27 +2673,27 @@ func rewriteValueAMD64_OpAMD64CMPQconst(v *Value, config *Config) bool { func rewriteValueAMD64_OpAMD64CMPW(v *Value, config *Config) bool { b := v.Block _ = b - // match: (CMPW x (MOVWconst [c])) + // match: (CMPW x (MOVLconst [c])) // cond: - // result: (CMPWconst x [c]) + // result: (CMPWconst x [int64(int16(c))]) for { x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVWconst { + if v_1.Op != OpAMD64MOVLconst { break } c := v_1.AuxInt v.reset(OpAMD64CMPWconst) v.AddArg(x) - v.AuxInt = c + v.AuxInt = int64(int16(c)) return true } - // match: (CMPW (MOVWconst [c]) x) + // match: (CMPW (MOVLconst [c]) x) // cond: - // result: (InvertFlags (CMPWconst x [c])) + // result: (InvertFlags (CMPWconst x [int64(int16(c))])) for { v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVWconst { + if v_0.Op != OpAMD64MOVLconst { break } c := v_0.AuxInt @@ -3237,7 +2701,7 @@ func rewriteValueAMD64_OpAMD64CMPW(v *Value, config *Config) bool { v.reset(OpAMD64InvertFlags) v0 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) v0.AddArg(x) - v0.AuxInt = c + v0.AuxInt = int64(int16(c)) v.AddArg(v0) return true } @@ -3246,12 +2710,12 @@ func rewriteValueAMD64_OpAMD64CMPW(v *Value, config *Config) bool { func rewriteValueAMD64_OpAMD64CMPWconst(v *Value, config *Config) bool { b := v.Block _ = b - // match: (CMPWconst (MOVWconst [x]) [y]) + // match: (CMPWconst (MOVLconst [x]) [y]) // cond: int16(x)==int16(y) // result: (FlagEQ) for { v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVWconst { + if v_0.Op != OpAMD64MOVLconst { break } x := v_0.AuxInt @@ -3262,12 +2726,12 @@ func rewriteValueAMD64_OpAMD64CMPWconst(v *Value, config *Config) bool { v.reset(OpAMD64FlagEQ) return true } - // match: (CMPWconst (MOVWconst [x]) [y]) + // match: (CMPWconst (MOVLconst [x]) [y]) // cond: int16(x)uint16(y) // result: (FlagLT_UGT) for { v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVWconst { + if v_0.Op != OpAMD64MOVLconst { break } x := v_0.AuxInt @@ -3294,12 +2758,12 @@ func rewriteValueAMD64_OpAMD64CMPWconst(v *Value, config *Config) bool { v.reset(OpAMD64FlagLT_UGT) return true } - // match: (CMPWconst (MOVWconst [x]) [y]) + // match: (CMPWconst (MOVLconst [x]) [y]) // cond: int16(x)>int16(y) && uint16(x)int16(y) && uint16(x)>uint16(y) // result: (FlagGT_UGT) for { v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVWconst { + if v_0.Op != OpAMD64MOVLconst { break } x := v_0.AuxInt @@ -3326,12 +2790,12 @@ func rewriteValueAMD64_OpAMD64CMPWconst(v *Value, config *Config) bool { v.reset(OpAMD64FlagGT_UGT) return true } - // match: (CMPWconst (ANDWconst _ [m]) [n]) + // match: (CMPWconst (ANDLconst _ [m]) [n]) // cond: 0 <= int16(m) && int16(m) < int16(n) // result: (FlagLT_ULT) for { v_0 := v.Args[0] - if v_0.Op != OpAMD64ANDWconst { + if v_0.Op != OpAMD64ANDLconst { break } m := v_0.AuxInt @@ -3342,12 +2806,12 @@ func rewriteValueAMD64_OpAMD64CMPWconst(v *Value, config *Config) bool { v.reset(OpAMD64FlagLT_ULT) return true } - // match: (CMPWconst (ANDW x y) [0]) + // match: (CMPWconst (ANDL x y) [0]) // cond: // result: (TESTW x y) for { v_0 := v.Args[0] - if v_0.Op != OpAMD64ANDW { + if v_0.Op != OpAMD64ANDL { break } x := v_0.Args[0] @@ -3360,12 +2824,12 @@ func rewriteValueAMD64_OpAMD64CMPWconst(v *Value, config *Config) bool { v.AddArg(y) return true } - // match: (CMPWconst (ANDWconst [c] x) [0]) + // match: (CMPWconst (ANDLconst [c] x) [0]) // cond: - // result: (TESTWconst [c] x) + // result: (TESTWconst [int64(int16(c))] x) for { v_0 := v.Args[0] - if v_0.Op != OpAMD64ANDWconst { + if v_0.Op != OpAMD64ANDLconst { break } c := v_0.AuxInt @@ -3374,7 +2838,7 @@ func rewriteValueAMD64_OpAMD64CMPWconst(v *Value, config *Config) bool { break } v.reset(OpAMD64TESTWconst) - v.AuxInt = c + v.AuxInt = int64(int16(c)) v.AddArg(x) return true } @@ -3418,10 +2882,10 @@ func rewriteValueAMD64_OpCom16(v *Value, config *Config) bool { _ = b // match: (Com16 x) // cond: - // result: (NOTW x) + // result: (NOTL x) for { x := v.Args[0] - v.reset(OpAMD64NOTW) + v.reset(OpAMD64NOTL) v.AddArg(x) return true } @@ -3460,10 +2924,10 @@ func rewriteValueAMD64_OpCom8(v *Value, config *Config) bool { _ = b // match: (Com8 x) // cond: - // result: (NOTB x) + // result: (NOTL x) for { x := v.Args[0] - v.reset(OpAMD64NOTB) + v.reset(OpAMD64NOTL) v.AddArg(x) return true } @@ -3474,10 +2938,10 @@ func rewriteValueAMD64_OpConst16(v *Value, config *Config) bool { _ = b // match: (Const16 [val]) // cond: - // result: (MOVWconst [val]) + // result: (MOVLconst [val]) for { val := v.AuxInt - v.reset(OpAMD64MOVWconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = val return true } @@ -3544,10 +3008,10 @@ func rewriteValueAMD64_OpConst8(v *Value, config *Config) bool { _ = b // match: (Const8 [val]) // cond: - // result: (MOVBconst [val]) + // result: (MOVLconst [val]) for { val := v.AuxInt - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = val return true } @@ -3558,10 +3022,10 @@ func rewriteValueAMD64_OpConstBool(v *Value, config *Config) bool { _ = b // match: (ConstBool [b]) // cond: - // result: (MOVBconst [b]) + // result: (MOVLconst [b]) for { b := v.AuxInt - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = b return true } @@ -5955,20 +5419,20 @@ func rewriteValueAMD64_OpLsh16x16(v *Value, config *Config) bool { _ = b // match: (Lsh16x16 x y) // cond: - // result: (ANDW (SHLW x y) (SBBLcarrymask (CMPWconst y [16]))) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) for { t := v.Type x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64ANDW) - v0 := b.NewValue0(v.Line, OpAMD64SHLW, t) + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) v0.AddArg(x) v0.AddArg(y) v.AddArg(v0) v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) v2.AddArg(y) - v2.AuxInt = 16 + v2.AuxInt = 32 v1.AddArg(v2) v.AddArg(v1) return true @@ -5980,20 +5444,20 @@ func rewriteValueAMD64_OpLsh16x32(v *Value, config *Config) bool { _ = b // match: (Lsh16x32 x y) // cond: - // result: (ANDW (SHLW x y) (SBBLcarrymask (CMPLconst y [16]))) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) for { t := v.Type x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64ANDW) - v0 := b.NewValue0(v.Line, OpAMD64SHLW, t) + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) v0.AddArg(x) v0.AddArg(y) v.AddArg(v0) v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) v2.AddArg(y) - v2.AuxInt = 16 + v2.AuxInt = 32 v1.AddArg(v2) v.AddArg(v1) return true @@ -6005,20 +5469,20 @@ func rewriteValueAMD64_OpLsh16x64(v *Value, config *Config) bool { _ = b // match: (Lsh16x64 x y) // cond: - // result: (ANDW (SHLW x y) (SBBLcarrymask (CMPQconst y [16]))) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) for { t := v.Type x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64ANDW) - v0 := b.NewValue0(v.Line, OpAMD64SHLW, t) + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) v0.AddArg(x) v0.AddArg(y) v.AddArg(v0) v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) v2.AddArg(y) - v2.AuxInt = 16 + v2.AuxInt = 32 v1.AddArg(v2) v.AddArg(v1) return true @@ -6030,20 +5494,20 @@ func rewriteValueAMD64_OpLsh16x8(v *Value, config *Config) bool { _ = b // match: (Lsh16x8 x y) // cond: - // result: (ANDW (SHLW x y) (SBBLcarrymask (CMPBconst y [16]))) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) for { t := v.Type x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64ANDW) - v0 := b.NewValue0(v.Line, OpAMD64SHLW, t) + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) v0.AddArg(x) v0.AddArg(y) v.AddArg(v0) v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) v2.AddArg(y) - v2.AuxInt = 16 + v2.AuxInt = 32 v1.AddArg(v2) v.AddArg(v1) return true @@ -6255,20 +5719,20 @@ func rewriteValueAMD64_OpLsh8x16(v *Value, config *Config) bool { _ = b // match: (Lsh8x16 x y) // cond: - // result: (ANDB (SHLB x y) (SBBLcarrymask (CMPWconst y [8]))) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) for { t := v.Type x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64ANDB) - v0 := b.NewValue0(v.Line, OpAMD64SHLB, t) + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) v0.AddArg(x) v0.AddArg(y) v.AddArg(v0) v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) v2.AddArg(y) - v2.AuxInt = 8 + v2.AuxInt = 32 v1.AddArg(v2) v.AddArg(v1) return true @@ -6280,20 +5744,20 @@ func rewriteValueAMD64_OpLsh8x32(v *Value, config *Config) bool { _ = b // match: (Lsh8x32 x y) // cond: - // result: (ANDB (SHLB x y) (SBBLcarrymask (CMPLconst y [8]))) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) for { t := v.Type x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64ANDB) - v0 := b.NewValue0(v.Line, OpAMD64SHLB, t) + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) v0.AddArg(x) v0.AddArg(y) v.AddArg(v0) v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) v2.AddArg(y) - v2.AuxInt = 8 + v2.AuxInt = 32 v1.AddArg(v2) v.AddArg(v1) return true @@ -6305,20 +5769,20 @@ func rewriteValueAMD64_OpLsh8x64(v *Value, config *Config) bool { _ = b // match: (Lsh8x64 x y) // cond: - // result: (ANDB (SHLB x y) (SBBLcarrymask (CMPQconst y [8]))) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) for { t := v.Type x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64ANDB) - v0 := b.NewValue0(v.Line, OpAMD64SHLB, t) + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) v0.AddArg(x) v0.AddArg(y) v.AddArg(v0) v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) v2.AddArg(y) - v2.AuxInt = 8 + v2.AuxInt = 32 v1.AddArg(v2) v.AddArg(v1) return true @@ -6330,20 +5794,20 @@ func rewriteValueAMD64_OpLsh8x8(v *Value, config *Config) bool { _ = b // match: (Lsh8x8 x y) // cond: - // result: (ANDB (SHLB x y) (SBBLcarrymask (CMPBconst y [8]))) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) for { t := v.Type x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64ANDB) - v0 := b.NewValue0(v.Line, OpAMD64SHLB, t) + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) v0.AddArg(x) v0.AddArg(y) v.AddArg(v0) v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) v2.AddArg(y) - v2.AuxInt = 8 + v2.AuxInt = 32 v1.AddArg(v2) v.AddArg(v1) return true @@ -6378,12 +5842,12 @@ func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value, config *Config) bool { v0.AddArg(mem) return true } - // match: (MOVBQSX (ANDBconst [c] x)) + // match: (MOVBQSX (ANDLconst [c] x)) // cond: c & 0x80 == 0 - // result: (ANDQconst [c & 0x7f] x) + // result: (ANDLconst [c & 0x7f] x) for { v_0 := v.Args[0] - if v_0.Op != OpAMD64ANDBconst { + if v_0.Op != OpAMD64ANDLconst { break } c := v_0.AuxInt @@ -6391,7 +5855,7 @@ func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value, config *Config) bool { if !(c&0x80 == 0) { break } - v.reset(OpAMD64ANDQconst) + v.reset(OpAMD64ANDLconst) v.AuxInt = c & 0x7f v.AddArg(x) return true @@ -6482,17 +5946,17 @@ func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value, config *Config) bool { v0.AddArg(mem) return true } - // match: (MOVBQZX (ANDBconst [c] x)) + // match: (MOVBQZX (ANDLconst [c] x)) // cond: - // result: (ANDQconst [c & 0xff] x) + // result: (ANDLconst [c & 0xff] x) for { v_0 := v.Args[0] - if v_0.Op != OpAMD64ANDBconst { + if v_0.Op != OpAMD64ANDLconst { break } c := v_0.AuxInt x := v_0.Args[0] - v.reset(OpAMD64ANDQconst) + v.reset(OpAMD64ANDLconst) v.AuxInt = c & 0xff v.AddArg(x) return true @@ -6743,7 +6207,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value, config *Config) bool { v.AddArg(mem) return true } - // match: (MOVBstore [off] {sym} ptr (MOVBconst [c]) mem) + // match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) // cond: validOff(off) // result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem) for { @@ -6751,7 +6215,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value, config *Config) bool { sym := v.Aux ptr := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVBconst { + if v_1.Op != OpAMD64MOVLconst { break } c := v_1.AuxInt @@ -7334,7 +6798,7 @@ func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value, config *Config) bool { } // match: (MOVLQSX (ANDLconst [c] x)) // cond: c & 0x80000000 == 0 - // result: (ANDQconst [c & 0x7fffffff] x) + // result: (ANDLconst [c & 0x7fffffff] x) for { v_0 := v.Args[0] if v_0.Op != OpAMD64ANDLconst { @@ -7345,7 +6809,7 @@ func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value, config *Config) bool { if !(c&0x80000000 == 0) { break } - v.reset(OpAMD64ANDQconst) + v.reset(OpAMD64ANDLconst) v.AuxInt = c & 0x7fffffff v.AddArg(x) return true @@ -7464,8 +6928,8 @@ func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value, config *Config) bool { return true } // match: (MOVLQZX (ANDLconst [c] x)) - // cond: c & 0x80000000 == 0 - // result: (ANDQconst [c & 0x7fffffff] x) + // cond: + // result: (ANDLconst [c] x) for { v_0 := v.Args[0] if v_0.Op != OpAMD64ANDLconst { @@ -7473,11 +6937,8 @@ func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value, config *Config) bool { } c := v_0.AuxInt x := v_0.Args[0] - if !(c&0x80000000 == 0) { - break - } - v.reset(OpAMD64ANDQconst) - v.AuxInt = c & 0x7fffffff + v.reset(OpAMD64ANDLconst) + v.AuxInt = c v.AddArg(x) return true } @@ -10635,12 +10096,12 @@ func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value, config *Config) bool { v0.AddArg(mem) return true } - // match: (MOVWQSX (ANDWconst [c] x)) + // match: (MOVWQSX (ANDLconst [c] x)) // cond: c & 0x8000 == 0 - // result: (ANDQconst [c & 0x7fff] x) + // result: (ANDLconst [c & 0x7fff] x) for { v_0 := v.Args[0] - if v_0.Op != OpAMD64ANDWconst { + if v_0.Op != OpAMD64ANDLconst { break } c := v_0.AuxInt @@ -10648,7 +10109,7 @@ func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value, config *Config) bool { if !(c&0x8000 == 0) { break } - v.reset(OpAMD64ANDQconst) + v.reset(OpAMD64ANDLconst) v.AuxInt = c & 0x7fff v.AddArg(x) return true @@ -10766,17 +10227,17 @@ func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value, config *Config) bool { v0.AddArg(mem) return true } - // match: (MOVWQZX (ANDWconst [c] x)) + // match: (MOVWQZX (ANDLconst [c] x)) // cond: - // result: (ANDQconst [c & 0xffff] x) + // result: (ANDLconst [c & 0xffff] x) for { v_0 := v.Args[0] - if v_0.Op != OpAMD64ANDWconst { + if v_0.Op != OpAMD64ANDLconst { break } c := v_0.AuxInt x := v_0.Args[0] - v.reset(OpAMD64ANDQconst) + v.reset(OpAMD64ANDLconst) v.AuxInt = c & 0xffff v.AddArg(x) return true @@ -11126,7 +10587,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value, config *Config) bool { v.AddArg(mem) return true } - // match: (MOVWstore [off] {sym} ptr (MOVWconst [c]) mem) + // match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) // cond: validOff(off) // result: (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem) for { @@ -11134,7 +10595,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value, config *Config) bool { sym := v.Aux ptr := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVWconst { + if v_1.Op != OpAMD64MOVLconst { break } c := v_1.AuxInt @@ -12037,60 +11498,6 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx2(v *Value, config *Config) bool { } return false } -func rewriteValueAMD64_OpAMD64MULB(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (MULB x (MOVBconst [c])) - // cond: - // result: (MULBconst [c] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVBconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64MULBconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (MULB (MOVBconst [c]) x) - // cond: - // result: (MULBconst [c] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVBconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - v.reset(OpAMD64MULBconst) - v.AuxInt = c - v.AddArg(x) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MULBconst(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (MULBconst [c] (MOVBconst [d])) - // cond: - // result: (MOVBconst [int64(int8(c*d))]) - for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVBconst { - break - } - d := v_0.AuxInt - v.reset(OpAMD64MOVBconst) - v.AuxInt = int64(int8(c * d)) - return true - } - return false -} func rewriteValueAMD64_OpAMD64MULL(v *Value, config *Config) bool { b := v.Block _ = b @@ -12557,60 +11964,6 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value, config *Config) bool { } return false } -func rewriteValueAMD64_OpAMD64MULW(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (MULW x (MOVWconst [c])) - // cond: - // result: (MULWconst [c] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVWconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64MULWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (MULW (MOVWconst [c]) x) - // cond: - // result: (MULWconst [c] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVWconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - v.reset(OpAMD64MULWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MULWconst(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (MULWconst [c] (MOVWconst [d])) - // cond: - // result: (MOVWconst [int64(int16(c*d))]) - for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVWconst { - break - } - d := v_0.AuxInt - v.reset(OpAMD64MOVWconst) - v.AuxInt = int64(int16(c * d)) - return true - } - return false -} func rewriteValueAMD64_OpMod16(v *Value, config *Config) bool { b := v.Block _ = b @@ -13106,11 +12459,11 @@ func rewriteValueAMD64_OpMul16(v *Value, config *Config) bool { _ = b // match: (Mul16 x y) // cond: - // result: (MULW x y) + // result: (MULL x y) for { x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64MULW) + v.reset(OpAMD64MULL) v.AddArg(x) v.AddArg(y) return true @@ -13186,35 +12539,17 @@ func rewriteValueAMD64_OpMul8(v *Value, config *Config) bool { _ = b // match: (Mul8 x y) // cond: - // result: (MULB x y) + // result: (MULL x y) for { x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64MULB) + v.reset(OpAMD64MULL) v.AddArg(x) v.AddArg(y) return true } return false } -func rewriteValueAMD64_OpAMD64NEGB(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (NEGB (MOVBconst [c])) - // cond: - // result: (MOVBconst [int64(int8(-c))]) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVBconst { - break - } - c := v_0.AuxInt - v.reset(OpAMD64MOVBconst) - v.AuxInt = int64(int8(-c)) - return true - } - return false -} func rewriteValueAMD64_OpAMD64NEGL(v *Value, config *Config) bool { b := v.Block _ = b @@ -13251,42 +12586,6 @@ func rewriteValueAMD64_OpAMD64NEGQ(v *Value, config *Config) bool { } return false } -func rewriteValueAMD64_OpAMD64NEGW(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (NEGW (MOVWconst [c])) - // cond: - // result: (MOVWconst [int64(int16(-c))]) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVWconst { - break - } - c := v_0.AuxInt - v.reset(OpAMD64MOVWconst) - v.AuxInt = int64(int16(-c)) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64NOTB(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (NOTB (MOVBconst [c])) - // cond: - // result: (MOVBconst [^c]) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVBconst { - break - } - c := v_0.AuxInt - v.reset(OpAMD64MOVBconst) - v.AuxInt = ^c - return true - } - return false -} func rewriteValueAMD64_OpAMD64NOTL(v *Value, config *Config) bool { b := v.Block _ = b @@ -13323,33 +12622,15 @@ func rewriteValueAMD64_OpAMD64NOTQ(v *Value, config *Config) bool { } return false } -func rewriteValueAMD64_OpAMD64NOTW(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (NOTW (MOVWconst [c])) - // cond: - // result: (MOVWconst [^c]) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVWconst { - break - } - c := v_0.AuxInt - v.reset(OpAMD64MOVWconst) - v.AuxInt = ^c - return true - } - return false -} func rewriteValueAMD64_OpNeg16(v *Value, config *Config) bool { b := v.Block _ = b // match: (Neg16 x) // cond: - // result: (NEGW x) + // result: (NEGL x) for { x := v.Args[0] - v.reset(OpAMD64NEGW) + v.reset(OpAMD64NEGL) v.AddArg(x) return true } @@ -13422,10 +12703,10 @@ func rewriteValueAMD64_OpNeg8(v *Value, config *Config) bool { _ = b // match: (Neg8 x) // cond: - // result: (NEGB x) + // result: (NEGL x) for { x := v.Args[0] - v.reset(OpAMD64NEGB) + v.reset(OpAMD64NEGL) v.AddArg(x) return true } @@ -13578,109 +12859,16 @@ func rewriteValueAMD64_OpNot(v *Value, config *Config) bool { _ = b // match: (Not x) // cond: - // result: (XORBconst [1] x) + // result: (XORLconst [1] x) for { x := v.Args[0] - v.reset(OpAMD64XORBconst) + v.reset(OpAMD64XORLconst) v.AuxInt = 1 v.AddArg(x) return true } return false } -func rewriteValueAMD64_OpAMD64ORB(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (ORB x (MOVBconst [c])) - // cond: - // result: (ORBconst [c] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVBconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64ORBconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ORB (MOVBconst [c]) x) - // cond: - // result: (ORBconst [c] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVBconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - v.reset(OpAMD64ORBconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ORB x x) - // cond: - // result: x - for { - x := v.Args[0] - if x != v.Args[1] { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64ORBconst(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (ORBconst [c] x) - // cond: int8(c)==0 - // result: x - for { - c := v.AuxInt - x := v.Args[0] - if !(int8(c) == 0) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (ORBconst [c] _) - // cond: int8(c)==-1 - // result: (MOVBconst [-1]) - for { - c := v.AuxInt - if !(int8(c) == -1) { - break - } - v.reset(OpAMD64MOVBconst) - v.AuxInt = -1 - return true - } - // match: (ORBconst [c] (MOVBconst [d])) - // cond: - // result: (MOVBconst [c|d]) - for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVBconst { - break - } - d := v_0.AuxInt - v.reset(OpAMD64MOVBconst) - v.AuxInt = c | d - return true - } - return false -} func rewriteValueAMD64_OpAMD64ORL(v *Value, config *Config) bool { b := v.Block _ = b @@ -13727,6 +12915,54 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value, config *Config) bool { v.AddArg(x) return true } + // match: (ORL x0:(MOVBload [i] {s} p mem) s0:(SHLLconst [8] x1:(MOVBload [i+1] {s} p mem))) + // cond: x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) + // result: @mergePoint(b,x0,x1) (MOVWload [i] {s} p mem) + for { + x0 := v.Args[0] + if x0.Op != OpAMD64MOVBload { + break + } + i := x0.AuxInt + s := x0.Aux + p := x0.Args[0] + mem := x0.Args[1] + s0 := v.Args[1] + if s0.Op != OpAMD64SHLLconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpAMD64MOVBload { + break + } + if x1.AuxInt != i+1 { + break + } + if x1.Aux != s { + break + } + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { + break + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16()) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } // match: (ORL o0:(ORL o1:(ORL x0:(MOVBload [i] {s} p mem) s0:(SHLLconst [8] x1:(MOVBload [i+1] {s} p mem))) s1:(SHLLconst [16] x2:(MOVBload [i+2] {s} p mem))) s2:(SHLLconst [24] x3:(MOVBload [i+3] {s} p mem))) // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1) // result: @mergePoint(b,x0,x1,x2,x3) (MOVLload [i] {s} p mem) @@ -13829,6 +13065,59 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value, config *Config) bool { v0.AddArg(mem) return true } + // match: (ORL x0:(MOVBloadidx1 [i] {s} p idx mem) s0:(SHLLconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) + // cond: x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) + // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i] {s} p idx mem) + for { + x0 := v.Args[0] + if x0.Op != OpAMD64MOVBloadidx1 { + break + } + i := x0.AuxInt + s := x0.Aux + p := x0.Args[0] + idx := x0.Args[1] + mem := x0.Args[2] + s0 := v.Args[1] + if s0.Op != OpAMD64SHLLconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpAMD64MOVBloadidx1 { + break + } + if x1.AuxInt != i+1 { + break + } + if x1.Aux != s { + break + } + if p != x1.Args[0] { + break + } + if idx != x1.Args[1] { + break + } + if mem != x1.Args[2] { + break + } + if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { + break + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(v.Line, OpAMD64MOVWloadidx1, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i + v0.Aux = s + v0.AddArg(p) + v0.AddArg(idx) + v0.AddArg(mem) + return true + } // match: (ORL o0:(ORL o1:(ORL x0:(MOVBloadidx1 [i] {s} p idx mem) s0:(SHLLconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) s1:(SHLLconst [16] x2:(MOVBloadidx1 [i+2] {s} p idx mem))) s2:(SHLLconst [24] x3:(MOVBloadidx1 [i+3] {s} p idx mem))) // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1) // result: @mergePoint(b,x0,x1,x2,x3) (MOVLloadidx1 [i] {s} p idx mem) @@ -14529,200 +13818,6 @@ func rewriteValueAMD64_OpAMD64ORQconst(v *Value, config *Config) bool { } return false } -func rewriteValueAMD64_OpAMD64ORW(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (ORW x (MOVWconst [c])) - // cond: - // result: (ORWconst [c] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVWconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64ORWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ORW (MOVWconst [c]) x) - // cond: - // result: (ORWconst [c] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVWconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - v.reset(OpAMD64ORWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ORW x x) - // cond: - // result: x - for { - x := v.Args[0] - if x != v.Args[1] { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (ORW x0:(MOVBload [i] {s} p mem) s0:(SHLWconst [8] x1:(MOVBload [i+1] {s} p mem))) - // cond: x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) - // result: @mergePoint(b,x0,x1) (MOVWload [i] {s} p mem) - for { - x0 := v.Args[0] - if x0.Op != OpAMD64MOVBload { - break - } - i := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - mem := x0.Args[1] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLWconst { - break - } - if s0.AuxInt != 8 { - break - } - x1 := s0.Args[0] - if x1.Op != OpAMD64MOVBload { - break - } - if x1.AuxInt != i+1 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if mem != x1.Args[1] { - break - } - if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16()) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (ORW x0:(MOVBloadidx1 [i] {s} p idx mem) s0:(SHLWconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) - // cond: x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) - // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i] {s} p idx mem) - for { - x0 := v.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLWconst { - break - } - if s0.AuxInt != 8 { - break - } - x1 := s0.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - if x1.AuxInt != i+1 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Line, OpAMD64MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64ORWconst(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (ORWconst [c] x) - // cond: int16(c)==0 - // result: x - for { - c := v.AuxInt - x := v.Args[0] - if !(int16(c) == 0) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (ORWconst [c] _) - // cond: int16(c)==-1 - // result: (MOVWconst [-1]) - for { - c := v.AuxInt - if !(int16(c) == -1) { - break - } - v.reset(OpAMD64MOVWconst) - v.AuxInt = -1 - return true - } - // match: (ORWconst [c] (MOVWconst [d])) - // cond: - // result: (MOVWconst [c|d]) - for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVWconst { - break - } - d := v_0.AuxInt - v.reset(OpAMD64MOVWconst) - v.AuxInt = c | d - return true - } - return false -} func rewriteValueAMD64_OpOffPtr(v *Value, config *Config) bool { b := v.Block _ = b @@ -14760,11 +13855,11 @@ func rewriteValueAMD64_OpOr16(v *Value, config *Config) bool { _ = b // match: (Or16 x y) // cond: - // result: (ORW x y) + // result: (ORL x y) for { x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64ORW) + v.reset(OpAMD64ORL) v.AddArg(x) v.AddArg(y) return true @@ -14808,11 +13903,11 @@ func rewriteValueAMD64_OpOr8(v *Value, config *Config) bool { _ = b // match: (Or8 x y) // cond: - // result: (ORB x y) + // result: (ORL x y) for { x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64ORB) + v.reset(OpAMD64ORL) v.AddArg(x) v.AddArg(y) return true @@ -14824,12 +13919,12 @@ func rewriteValueAMD64_OpRsh16Ux16(v *Value, config *Config) bool { _ = b // match: (Rsh16Ux16 x y) // cond: - // result: (ANDW (SHRW x y) (SBBLcarrymask (CMPWconst y [16]))) + // result: (ANDL (SHRW x y) (SBBLcarrymask (CMPWconst y [16]))) for { t := v.Type x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64ANDW) + v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Line, OpAMD64SHRW, t) v0.AddArg(x) v0.AddArg(y) @@ -14849,12 +13944,12 @@ func rewriteValueAMD64_OpRsh16Ux32(v *Value, config *Config) bool { _ = b // match: (Rsh16Ux32 x y) // cond: - // result: (ANDW (SHRW x y) (SBBLcarrymask (CMPLconst y [16]))) + // result: (ANDL (SHRW x y) (SBBLcarrymask (CMPLconst y [16]))) for { t := v.Type x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64ANDW) + v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Line, OpAMD64SHRW, t) v0.AddArg(x) v0.AddArg(y) @@ -14874,12 +13969,12 @@ func rewriteValueAMD64_OpRsh16Ux64(v *Value, config *Config) bool { _ = b // match: (Rsh16Ux64 x y) // cond: - // result: (ANDW (SHRW x y) (SBBLcarrymask (CMPQconst y [16]))) + // result: (ANDL (SHRW x y) (SBBLcarrymask (CMPQconst y [16]))) for { t := v.Type x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64ANDW) + v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Line, OpAMD64SHRW, t) v0.AddArg(x) v0.AddArg(y) @@ -14899,12 +13994,12 @@ func rewriteValueAMD64_OpRsh16Ux8(v *Value, config *Config) bool { _ = b // match: (Rsh16Ux8 x y) // cond: - // result: (ANDW (SHRW x y) (SBBLcarrymask (CMPBconst y [16]))) + // result: (ANDL (SHRW x y) (SBBLcarrymask (CMPBconst y [16]))) for { t := v.Type x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64ANDW) + v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Line, OpAMD64SHRW, t) v0.AddArg(x) v0.AddArg(y) @@ -14924,7 +14019,7 @@ func rewriteValueAMD64_OpRsh16x16(v *Value, config *Config) bool { _ = b // match: (Rsh16x16 x y) // cond: - // result: (SARW x (ORW y (NOTL (SBBLcarrymask (CMPWconst y [16]))))) + // result: (SARW x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [16]))))) for { t := v.Type x := v.Args[0] @@ -14932,7 +14027,7 @@ func rewriteValueAMD64_OpRsh16x16(v *Value, config *Config) bool { v.reset(OpAMD64SARW) v.Type = t v.AddArg(x) - v0 := b.NewValue0(v.Line, OpAMD64ORW, y.Type) + v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) v0.AddArg(y) v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) @@ -15008,7 +14103,7 @@ func rewriteValueAMD64_OpRsh16x8(v *Value, config *Config) bool { _ = b // match: (Rsh16x8 x y) // cond: - // result: (SARW x (ORB y (NOTL (SBBLcarrymask (CMPBconst y [16]))))) + // result: (SARW x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [16]))))) for { t := v.Type x := v.Args[0] @@ -15016,7 +14111,7 @@ func rewriteValueAMD64_OpRsh16x8(v *Value, config *Config) bool { v.reset(OpAMD64SARW) v.Type = t v.AddArg(x) - v0 := b.NewValue0(v.Line, OpAMD64ORB, y.Type) + v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) v0.AddArg(y) v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) @@ -15136,7 +14231,7 @@ func rewriteValueAMD64_OpRsh32x16(v *Value, config *Config) bool { _ = b // match: (Rsh32x16 x y) // cond: - // result: (SARL x (ORW y (NOTL (SBBLcarrymask (CMPWconst y [32]))))) + // result: (SARL x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [32]))))) for { t := v.Type x := v.Args[0] @@ -15144,7 +14239,7 @@ func rewriteValueAMD64_OpRsh32x16(v *Value, config *Config) bool { v.reset(OpAMD64SARL) v.Type = t v.AddArg(x) - v0 := b.NewValue0(v.Line, OpAMD64ORW, y.Type) + v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) v0.AddArg(y) v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) @@ -15220,7 +14315,7 @@ func rewriteValueAMD64_OpRsh32x8(v *Value, config *Config) bool { _ = b // match: (Rsh32x8 x y) // cond: - // result: (SARL x (ORB y (NOTL (SBBLcarrymask (CMPBconst y [32]))))) + // result: (SARL x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [32]))))) for { t := v.Type x := v.Args[0] @@ -15228,7 +14323,7 @@ func rewriteValueAMD64_OpRsh32x8(v *Value, config *Config) bool { v.reset(OpAMD64SARL) v.Type = t v.AddArg(x) - v0 := b.NewValue0(v.Line, OpAMD64ORB, y.Type) + v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) v0.AddArg(y) v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) @@ -15348,7 +14443,7 @@ func rewriteValueAMD64_OpRsh64x16(v *Value, config *Config) bool { _ = b // match: (Rsh64x16 x y) // cond: - // result: (SARQ x (ORW y (NOTL (SBBLcarrymask (CMPWconst y [64]))))) + // result: (SARQ x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [64]))))) for { t := v.Type x := v.Args[0] @@ -15356,7 +14451,7 @@ func rewriteValueAMD64_OpRsh64x16(v *Value, config *Config) bool { v.reset(OpAMD64SARQ) v.Type = t v.AddArg(x) - v0 := b.NewValue0(v.Line, OpAMD64ORW, y.Type) + v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) v0.AddArg(y) v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) @@ -15432,7 +14527,7 @@ func rewriteValueAMD64_OpRsh64x8(v *Value, config *Config) bool { _ = b // match: (Rsh64x8 x y) // cond: - // result: (SARQ x (ORB y (NOTL (SBBLcarrymask (CMPBconst y [64]))))) + // result: (SARQ x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [64]))))) for { t := v.Type x := v.Args[0] @@ -15440,7 +14535,7 @@ func rewriteValueAMD64_OpRsh64x8(v *Value, config *Config) bool { v.reset(OpAMD64SARQ) v.Type = t v.AddArg(x) - v0 := b.NewValue0(v.Line, OpAMD64ORB, y.Type) + v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) v0.AddArg(y) v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) @@ -15460,12 +14555,12 @@ func rewriteValueAMD64_OpRsh8Ux16(v *Value, config *Config) bool { _ = b // match: (Rsh8Ux16 x y) // cond: - // result: (ANDB (SHRB x y) (SBBLcarrymask (CMPWconst y [8]))) + // result: (ANDL (SHRB x y) (SBBLcarrymask (CMPWconst y [8]))) for { t := v.Type x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64ANDB) + v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Line, OpAMD64SHRB, t) v0.AddArg(x) v0.AddArg(y) @@ -15485,12 +14580,12 @@ func rewriteValueAMD64_OpRsh8Ux32(v *Value, config *Config) bool { _ = b // match: (Rsh8Ux32 x y) // cond: - // result: (ANDB (SHRB x y) (SBBLcarrymask (CMPLconst y [8]))) + // result: (ANDL (SHRB x y) (SBBLcarrymask (CMPLconst y [8]))) for { t := v.Type x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64ANDB) + v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Line, OpAMD64SHRB, t) v0.AddArg(x) v0.AddArg(y) @@ -15510,12 +14605,12 @@ func rewriteValueAMD64_OpRsh8Ux64(v *Value, config *Config) bool { _ = b // match: (Rsh8Ux64 x y) // cond: - // result: (ANDB (SHRB x y) (SBBLcarrymask (CMPQconst y [8]))) + // result: (ANDL (SHRB x y) (SBBLcarrymask (CMPQconst y [8]))) for { t := v.Type x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64ANDB) + v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Line, OpAMD64SHRB, t) v0.AddArg(x) v0.AddArg(y) @@ -15535,12 +14630,12 @@ func rewriteValueAMD64_OpRsh8Ux8(v *Value, config *Config) bool { _ = b // match: (Rsh8Ux8 x y) // cond: - // result: (ANDB (SHRB x y) (SBBLcarrymask (CMPBconst y [8]))) + // result: (ANDL (SHRB x y) (SBBLcarrymask (CMPBconst y [8]))) for { t := v.Type x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64ANDB) + v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Line, OpAMD64SHRB, t) v0.AddArg(x) v0.AddArg(y) @@ -15560,7 +14655,7 @@ func rewriteValueAMD64_OpRsh8x16(v *Value, config *Config) bool { _ = b // match: (Rsh8x16 x y) // cond: - // result: (SARB x (ORW y (NOTL (SBBLcarrymask (CMPWconst y [8]))))) + // result: (SARB x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [8]))))) for { t := v.Type x := v.Args[0] @@ -15568,7 +14663,7 @@ func rewriteValueAMD64_OpRsh8x16(v *Value, config *Config) bool { v.reset(OpAMD64SARB) v.Type = t v.AddArg(x) - v0 := b.NewValue0(v.Line, OpAMD64ORW, y.Type) + v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) v0.AddArg(y) v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) @@ -15644,7 +14739,7 @@ func rewriteValueAMD64_OpRsh8x8(v *Value, config *Config) bool { _ = b // match: (Rsh8x8 x y) // cond: - // result: (SARB x (ORB y (NOTL (SBBLcarrymask (CMPBconst y [8]))))) + // result: (SARB x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [8]))))) for { t := v.Type x := v.Args[0] @@ -15652,7 +14747,7 @@ func rewriteValueAMD64_OpRsh8x8(v *Value, config *Config) bool { v.reset(OpAMD64SARB) v.Type = t v.AddArg(x) - v0 := b.NewValue0(v.Line, OpAMD64ORB, y.Type) + v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) v0.AddArg(y) v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) @@ -15700,54 +14795,6 @@ func rewriteValueAMD64_OpAMD64SARB(v *Value, config *Config) bool { v.AddArg(x) return true } - // match: (SARB x (MOVWconst [c])) - // cond: - // result: (SARBconst [c&31] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVWconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SARBconst) - v.AuxInt = c & 31 - v.AddArg(x) - return true - } - // match: (SARB x (MOVBconst [c])) - // cond: - // result: (SARBconst [c&31] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVBconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SARBconst) - v.AuxInt = c & 31 - v.AddArg(x) - return true - } - // match: (SARB x (ANDBconst [31] y)) - // cond: - // result: (SARB x y) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ANDBconst { - break - } - if v_1.AuxInt != 31 { - break - } - y := v_1.Args[0] - v.reset(OpAMD64SARB) - v.AddArg(x) - v.AddArg(y) - return true - } return false } func rewriteValueAMD64_OpAMD64SARBconst(v *Value, config *Config) bool { @@ -15802,36 +14849,6 @@ func rewriteValueAMD64_OpAMD64SARL(v *Value, config *Config) bool { v.AddArg(x) return true } - // match: (SARL x (MOVWconst [c])) - // cond: - // result: (SARLconst [c&31] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVWconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SARLconst) - v.AuxInt = c & 31 - v.AddArg(x) - return true - } - // match: (SARL x (MOVBconst [c])) - // cond: - // result: (SARLconst [c&31] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVBconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SARLconst) - v.AuxInt = c & 31 - v.AddArg(x) - return true - } // match: (SARL x (ANDLconst [31] y)) // cond: // result: (SARL x y) @@ -15904,36 +14921,6 @@ func rewriteValueAMD64_OpAMD64SARQ(v *Value, config *Config) bool { v.AddArg(x) return true } - // match: (SARQ x (MOVWconst [c])) - // cond: - // result: (SARQconst [c&63] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVWconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SARQconst) - v.AuxInt = c & 63 - v.AddArg(x) - return true - } - // match: (SARQ x (MOVBconst [c])) - // cond: - // result: (SARQconst [c&63] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVBconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SARQconst) - v.AuxInt = c & 63 - v.AddArg(x) - return true - } // match: (SARQ x (ANDQconst [63] y)) // cond: // result: (SARQ x y) @@ -16006,54 +14993,6 @@ func rewriteValueAMD64_OpAMD64SARW(v *Value, config *Config) bool { v.AddArg(x) return true } - // match: (SARW x (MOVWconst [c])) - // cond: - // result: (SARWconst [c&31] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVWconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SARWconst) - v.AuxInt = c & 31 - v.AddArg(x) - return true - } - // match: (SARW x (MOVBconst [c])) - // cond: - // result: (SARWconst [c&31] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVBconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SARWconst) - v.AuxInt = c & 31 - v.AddArg(x) - return true - } - // match: (SARW x (ANDWconst [31] y)) - // cond: - // result: (SARW x y) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ANDWconst { - break - } - if v_1.AuxInt != 31 { - break - } - y := v_1.Args[0] - v.reset(OpAMD64SARW) - v.AddArg(x) - v.AddArg(y) - return true - } return false } func rewriteValueAMD64_OpAMD64SARWconst(v *Value, config *Config) bool { @@ -16223,61 +15162,61 @@ func rewriteValueAMD64_OpAMD64SETA(v *Value, config *Config) bool { } // match: (SETA (FlagEQ)) // cond: - // result: (MOVBconst [0]) + // result: (MOVLconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagEQ { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 0 return true } // match: (SETA (FlagLT_ULT)) // cond: - // result: (MOVBconst [0]) + // result: (MOVLconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 0 return true } // match: (SETA (FlagLT_UGT)) // cond: - // result: (MOVBconst [1]) + // result: (MOVLconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 1 return true } // match: (SETA (FlagGT_ULT)) // cond: - // result: (MOVBconst [0]) + // result: (MOVLconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 0 return true } // match: (SETA (FlagGT_UGT)) // cond: - // result: (MOVBconst [1]) + // result: (MOVLconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 1 return true } @@ -16301,61 +15240,61 @@ func rewriteValueAMD64_OpAMD64SETAE(v *Value, config *Config) bool { } // match: (SETAE (FlagEQ)) // cond: - // result: (MOVBconst [1]) + // result: (MOVLconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagEQ { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 1 return true } // match: (SETAE (FlagLT_ULT)) // cond: - // result: (MOVBconst [0]) + // result: (MOVLconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 0 return true } // match: (SETAE (FlagLT_UGT)) // cond: - // result: (MOVBconst [1]) + // result: (MOVLconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 1 return true } // match: (SETAE (FlagGT_ULT)) // cond: - // result: (MOVBconst [0]) + // result: (MOVLconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 0 return true } // match: (SETAE (FlagGT_UGT)) // cond: - // result: (MOVBconst [1]) + // result: (MOVLconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 1 return true } @@ -16379,61 +15318,61 @@ func rewriteValueAMD64_OpAMD64SETB(v *Value, config *Config) bool { } // match: (SETB (FlagEQ)) // cond: - // result: (MOVBconst [0]) + // result: (MOVLconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagEQ { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 0 return true } // match: (SETB (FlagLT_ULT)) // cond: - // result: (MOVBconst [1]) + // result: (MOVLconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 1 return true } // match: (SETB (FlagLT_UGT)) // cond: - // result: (MOVBconst [0]) + // result: (MOVLconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 0 return true } // match: (SETB (FlagGT_ULT)) // cond: - // result: (MOVBconst [1]) + // result: (MOVLconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 1 return true } // match: (SETB (FlagGT_UGT)) // cond: - // result: (MOVBconst [0]) + // result: (MOVLconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 0 return true } @@ -16457,61 +15396,61 @@ func rewriteValueAMD64_OpAMD64SETBE(v *Value, config *Config) bool { } // match: (SETBE (FlagEQ)) // cond: - // result: (MOVBconst [1]) + // result: (MOVLconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagEQ { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 1 return true } // match: (SETBE (FlagLT_ULT)) // cond: - // result: (MOVBconst [1]) + // result: (MOVLconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 1 return true } // match: (SETBE (FlagLT_UGT)) // cond: - // result: (MOVBconst [0]) + // result: (MOVLconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 0 return true } // match: (SETBE (FlagGT_ULT)) // cond: - // result: (MOVBconst [1]) + // result: (MOVLconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 1 return true } // match: (SETBE (FlagGT_UGT)) // cond: - // result: (MOVBconst [0]) + // result: (MOVLconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 0 return true } @@ -16535,61 +15474,61 @@ func rewriteValueAMD64_OpAMD64SETEQ(v *Value, config *Config) bool { } // match: (SETEQ (FlagEQ)) // cond: - // result: (MOVBconst [1]) + // result: (MOVLconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagEQ { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 1 return true } // match: (SETEQ (FlagLT_ULT)) // cond: - // result: (MOVBconst [0]) + // result: (MOVLconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 0 return true } // match: (SETEQ (FlagLT_UGT)) // cond: - // result: (MOVBconst [0]) + // result: (MOVLconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 0 return true } // match: (SETEQ (FlagGT_ULT)) // cond: - // result: (MOVBconst [0]) + // result: (MOVLconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 0 return true } // match: (SETEQ (FlagGT_UGT)) // cond: - // result: (MOVBconst [0]) + // result: (MOVLconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 0 return true } @@ -16613,61 +15552,61 @@ func rewriteValueAMD64_OpAMD64SETG(v *Value, config *Config) bool { } // match: (SETG (FlagEQ)) // cond: - // result: (MOVBconst [0]) + // result: (MOVLconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagEQ { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 0 return true } // match: (SETG (FlagLT_ULT)) // cond: - // result: (MOVBconst [0]) + // result: (MOVLconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 0 return true } // match: (SETG (FlagLT_UGT)) // cond: - // result: (MOVBconst [0]) + // result: (MOVLconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 0 return true } // match: (SETG (FlagGT_ULT)) // cond: - // result: (MOVBconst [1]) + // result: (MOVLconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 1 return true } // match: (SETG (FlagGT_UGT)) // cond: - // result: (MOVBconst [1]) + // result: (MOVLconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 1 return true } @@ -16691,61 +15630,61 @@ func rewriteValueAMD64_OpAMD64SETGE(v *Value, config *Config) bool { } // match: (SETGE (FlagEQ)) // cond: - // result: (MOVBconst [1]) + // result: (MOVLconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagEQ { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 1 return true } // match: (SETGE (FlagLT_ULT)) // cond: - // result: (MOVBconst [0]) + // result: (MOVLconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 0 return true } // match: (SETGE (FlagLT_UGT)) // cond: - // result: (MOVBconst [0]) + // result: (MOVLconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 0 return true } // match: (SETGE (FlagGT_ULT)) // cond: - // result: (MOVBconst [1]) + // result: (MOVLconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 1 return true } // match: (SETGE (FlagGT_UGT)) // cond: - // result: (MOVBconst [1]) + // result: (MOVLconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 1 return true } @@ -16769,61 +15708,61 @@ func rewriteValueAMD64_OpAMD64SETL(v *Value, config *Config) bool { } // match: (SETL (FlagEQ)) // cond: - // result: (MOVBconst [0]) + // result: (MOVLconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagEQ { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 0 return true } // match: (SETL (FlagLT_ULT)) // cond: - // result: (MOVBconst [1]) + // result: (MOVLconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 1 return true } // match: (SETL (FlagLT_UGT)) // cond: - // result: (MOVBconst [1]) + // result: (MOVLconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 1 return true } // match: (SETL (FlagGT_ULT)) // cond: - // result: (MOVBconst [0]) + // result: (MOVLconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 0 return true } // match: (SETL (FlagGT_UGT)) // cond: - // result: (MOVBconst [0]) + // result: (MOVLconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 0 return true } @@ -16847,61 +15786,61 @@ func rewriteValueAMD64_OpAMD64SETLE(v *Value, config *Config) bool { } // match: (SETLE (FlagEQ)) // cond: - // result: (MOVBconst [1]) + // result: (MOVLconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagEQ { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 1 return true } // match: (SETLE (FlagLT_ULT)) // cond: - // result: (MOVBconst [1]) + // result: (MOVLconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 1 return true } // match: (SETLE (FlagLT_UGT)) // cond: - // result: (MOVBconst [1]) + // result: (MOVLconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 1 return true } // match: (SETLE (FlagGT_ULT)) // cond: - // result: (MOVBconst [0]) + // result: (MOVLconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 0 return true } // match: (SETLE (FlagGT_UGT)) // cond: - // result: (MOVBconst [0]) + // result: (MOVLconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 0 return true } @@ -16925,149 +15864,66 @@ func rewriteValueAMD64_OpAMD64SETNE(v *Value, config *Config) bool { } // match: (SETNE (FlagEQ)) // cond: - // result: (MOVBconst [0]) + // result: (MOVLconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagEQ { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 0 return true } // match: (SETNE (FlagLT_ULT)) // cond: - // result: (MOVBconst [1]) + // result: (MOVLconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 1 return true } // match: (SETNE (FlagLT_UGT)) // cond: - // result: (MOVBconst [1]) + // result: (MOVLconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 1 return true } // match: (SETNE (FlagGT_ULT)) // cond: - // result: (MOVBconst [1]) + // result: (MOVLconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 1 return true } // match: (SETNE (FlagGT_UGT)) // cond: - // result: (MOVBconst [1]) + // result: (MOVLconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 1 return true } return false } -func rewriteValueAMD64_OpAMD64SHLB(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (SHLB x (MOVQconst [c])) - // cond: - // result: (SHLBconst [c&31] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVQconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SHLBconst) - v.AuxInt = c & 31 - v.AddArg(x) - return true - } - // match: (SHLB x (MOVLconst [c])) - // cond: - // result: (SHLBconst [c&31] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVLconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SHLBconst) - v.AuxInt = c & 31 - v.AddArg(x) - return true - } - // match: (SHLB x (MOVWconst [c])) - // cond: - // result: (SHLBconst [c&31] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVWconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SHLBconst) - v.AuxInt = c & 31 - v.AddArg(x) - return true - } - // match: (SHLB x (MOVBconst [c])) - // cond: - // result: (SHLBconst [c&31] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVBconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SHLBconst) - v.AuxInt = c & 31 - v.AddArg(x) - return true - } - // match: (SHLB x (ANDBconst [31] y)) - // cond: - // result: (SHLB x y) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ANDBconst { - break - } - if v_1.AuxInt != 31 { - break - } - y := v_1.Args[0] - v.reset(OpAMD64SHLB) - v.AddArg(x) - v.AddArg(y) - return true - } - return false -} func rewriteValueAMD64_OpAMD64SHLL(v *Value, config *Config) bool { b := v.Block _ = b @@ -17101,36 +15957,6 @@ func rewriteValueAMD64_OpAMD64SHLL(v *Value, config *Config) bool { v.AddArg(x) return true } - // match: (SHLL x (MOVWconst [c])) - // cond: - // result: (SHLLconst [c&31] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVWconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SHLLconst) - v.AuxInt = c & 31 - v.AddArg(x) - return true - } - // match: (SHLL x (MOVBconst [c])) - // cond: - // result: (SHLLconst [c&31] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVBconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SHLLconst) - v.AuxInt = c & 31 - v.AddArg(x) - return true - } // match: (SHLL x (ANDLconst [31] y)) // cond: // result: (SHLL x y) @@ -17184,36 +16010,6 @@ func rewriteValueAMD64_OpAMD64SHLQ(v *Value, config *Config) bool { v.AddArg(x) return true } - // match: (SHLQ x (MOVWconst [c])) - // cond: - // result: (SHLQconst [c&63] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVWconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SHLQconst) - v.AuxInt = c & 63 - v.AddArg(x) - return true - } - // match: (SHLQ x (MOVBconst [c])) - // cond: - // result: (SHLQconst [c&63] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVBconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SHLQconst) - v.AuxInt = c & 63 - v.AddArg(x) - return true - } // match: (SHLQ x (ANDQconst [63] y)) // cond: // result: (SHLQ x y) @@ -17234,89 +16030,6 @@ func rewriteValueAMD64_OpAMD64SHLQ(v *Value, config *Config) bool { } return false } -func rewriteValueAMD64_OpAMD64SHLW(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (SHLW x (MOVQconst [c])) - // cond: - // result: (SHLWconst [c&31] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVQconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SHLWconst) - v.AuxInt = c & 31 - v.AddArg(x) - return true - } - // match: (SHLW x (MOVLconst [c])) - // cond: - // result: (SHLWconst [c&31] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVLconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SHLWconst) - v.AuxInt = c & 31 - v.AddArg(x) - return true - } - // match: (SHLW x (MOVWconst [c])) - // cond: - // result: (SHLWconst [c&31] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVWconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SHLWconst) - v.AuxInt = c & 31 - v.AddArg(x) - return true - } - // match: (SHLW x (MOVBconst [c])) - // cond: - // result: (SHLWconst [c&31] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVBconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SHLWconst) - v.AuxInt = c & 31 - v.AddArg(x) - return true - } - // match: (SHLW x (ANDWconst [31] y)) - // cond: - // result: (SHLW x y) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ANDWconst { - break - } - if v_1.AuxInt != 31 { - break - } - y := v_1.Args[0] - v.reset(OpAMD64SHLW) - v.AddArg(x) - v.AddArg(y) - return true - } - return false -} func rewriteValueAMD64_OpAMD64SHRB(v *Value, config *Config) bool { b := v.Block _ = b @@ -17350,54 +16063,6 @@ func rewriteValueAMD64_OpAMD64SHRB(v *Value, config *Config) bool { v.AddArg(x) return true } - // match: (SHRB x (MOVWconst [c])) - // cond: - // result: (SHRBconst [c&31] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVWconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SHRBconst) - v.AuxInt = c & 31 - v.AddArg(x) - return true - } - // match: (SHRB x (MOVBconst [c])) - // cond: - // result: (SHRBconst [c&31] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVBconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SHRBconst) - v.AuxInt = c & 31 - v.AddArg(x) - return true - } - // match: (SHRB x (ANDBconst [31] y)) - // cond: - // result: (SHRB x y) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ANDBconst { - break - } - if v_1.AuxInt != 31 { - break - } - y := v_1.Args[0] - v.reset(OpAMD64SHRB) - v.AddArg(x) - v.AddArg(y) - return true - } return false } func rewriteValueAMD64_OpAMD64SHRL(v *Value, config *Config) bool { @@ -17433,36 +16098,6 @@ func rewriteValueAMD64_OpAMD64SHRL(v *Value, config *Config) bool { v.AddArg(x) return true } - // match: (SHRL x (MOVWconst [c])) - // cond: - // result: (SHRLconst [c&31] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVWconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SHRLconst) - v.AuxInt = c & 31 - v.AddArg(x) - return true - } - // match: (SHRL x (MOVBconst [c])) - // cond: - // result: (SHRLconst [c&31] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVBconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SHRLconst) - v.AuxInt = c & 31 - v.AddArg(x) - return true - } // match: (SHRL x (ANDLconst [31] y)) // cond: // result: (SHRL x y) @@ -17516,36 +16151,6 @@ func rewriteValueAMD64_OpAMD64SHRQ(v *Value, config *Config) bool { v.AddArg(x) return true } - // match: (SHRQ x (MOVWconst [c])) - // cond: - // result: (SHRQconst [c&63] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVWconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SHRQconst) - v.AuxInt = c & 63 - v.AddArg(x) - return true - } - // match: (SHRQ x (MOVBconst [c])) - // cond: - // result: (SHRQconst [c&63] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVBconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SHRQconst) - v.AuxInt = c & 63 - v.AddArg(x) - return true - } // match: (SHRQ x (ANDQconst [63] y)) // cond: // result: (SHRQ x y) @@ -17599,163 +16204,6 @@ func rewriteValueAMD64_OpAMD64SHRW(v *Value, config *Config) bool { v.AddArg(x) return true } - // match: (SHRW x (MOVWconst [c])) - // cond: - // result: (SHRWconst [c&31] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVWconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SHRWconst) - v.AuxInt = c & 31 - v.AddArg(x) - return true - } - // match: (SHRW x (MOVBconst [c])) - // cond: - // result: (SHRWconst [c&31] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVBconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SHRWconst) - v.AuxInt = c & 31 - v.AddArg(x) - return true - } - // match: (SHRW x (ANDWconst [31] y)) - // cond: - // result: (SHRW x y) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ANDWconst { - break - } - if v_1.AuxInt != 31 { - break - } - y := v_1.Args[0] - v.reset(OpAMD64SHRW) - v.AddArg(x) - v.AddArg(y) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64SUBB(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (SUBB x (MOVBconst [c])) - // cond: - // result: (SUBBconst x [c]) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVBconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SUBBconst) - v.AddArg(x) - v.AuxInt = c - return true - } - // match: (SUBB (MOVBconst [c]) x) - // cond: - // result: (NEGB (SUBBconst x [c])) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVBconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - v.reset(OpAMD64NEGB) - v0 := b.NewValue0(v.Line, OpAMD64SUBBconst, v.Type) - v0.AddArg(x) - v0.AuxInt = c - v.AddArg(v0) - return true - } - // match: (SUBB x x) - // cond: - // result: (MOVBconst [0]) - for { - x := v.Args[0] - if x != v.Args[1] { - break - } - v.reset(OpAMD64MOVBconst) - v.AuxInt = 0 - return true - } - return false -} -func rewriteValueAMD64_OpAMD64SUBBconst(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (SUBBconst [c] x) - // cond: int8(c) == 0 - // result: x - for { - c := v.AuxInt - x := v.Args[0] - if !(int8(c) == 0) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (SUBBconst [c] x) - // cond: - // result: (ADDBconst [int64(int8(-c))] x) - for { - c := v.AuxInt - x := v.Args[0] - v.reset(OpAMD64ADDBconst) - v.AuxInt = int64(int8(-c)) - v.AddArg(x) - return true - } - // match: (SUBBconst (MOVBconst [d]) [c]) - // cond: - // result: (MOVBconst [int64(int8(d-c))]) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVBconst { - break - } - d := v_0.AuxInt - c := v.AuxInt - v.reset(OpAMD64MOVBconst) - v.AuxInt = int64(int8(d - c)) - return true - } - // match: (SUBBconst (SUBBconst x [d]) [c]) - // cond: - // result: (ADDBconst [int64(int8(-c-d))] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64SUBBconst { - break - } - x := v_0.Args[0] - d := v_0.AuxInt - c := v.AuxInt - v.reset(OpAMD64ADDBconst) - v.AuxInt = int64(int8(-c - d)) - v.AddArg(x) - return true - } return false } func rewriteValueAMD64_OpAMD64SUBL(v *Value, config *Config) bool { @@ -17987,115 +16435,6 @@ func rewriteValueAMD64_OpAMD64SUBQconst(v *Value, config *Config) bool { } return false } -func rewriteValueAMD64_OpAMD64SUBW(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (SUBW x (MOVWconst [c])) - // cond: - // result: (SUBWconst x [c]) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVWconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SUBWconst) - v.AddArg(x) - v.AuxInt = c - return true - } - // match: (SUBW (MOVWconst [c]) x) - // cond: - // result: (NEGW (SUBWconst x [c])) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVWconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - v.reset(OpAMD64NEGW) - v0 := b.NewValue0(v.Line, OpAMD64SUBWconst, v.Type) - v0.AddArg(x) - v0.AuxInt = c - v.AddArg(v0) - return true - } - // match: (SUBW x x) - // cond: - // result: (MOVWconst [0]) - for { - x := v.Args[0] - if x != v.Args[1] { - break - } - v.reset(OpAMD64MOVWconst) - v.AuxInt = 0 - return true - } - return false -} -func rewriteValueAMD64_OpAMD64SUBWconst(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (SUBWconst [c] x) - // cond: int16(c) == 0 - // result: x - for { - c := v.AuxInt - x := v.Args[0] - if !(int16(c) == 0) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (SUBWconst [c] x) - // cond: - // result: (ADDWconst [int64(int16(-c))] x) - for { - c := v.AuxInt - x := v.Args[0] - v.reset(OpAMD64ADDWconst) - v.AuxInt = int64(int16(-c)) - v.AddArg(x) - return true - } - // match: (SUBWconst (MOVWconst [d]) [c]) - // cond: - // result: (MOVWconst [int64(int16(d-c))]) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVWconst { - break - } - d := v_0.AuxInt - c := v.AuxInt - v.reset(OpAMD64MOVWconst) - v.AuxInt = int64(int16(d - c)) - return true - } - // match: (SUBWconst (SUBWconst x [d]) [c]) - // cond: - // result: (ADDWconst [int64(int16(-c-d))] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64SUBWconst { - break - } - x := v_0.Args[0] - d := v_0.AuxInt - c := v.AuxInt - v.reset(OpAMD64ADDWconst) - v.AuxInt = int64(int16(-c - d)) - v.AddArg(x) - return true - } - return false -} func rewriteValueAMD64_OpSignExt16to32(v *Value, config *Config) bool { b := v.Block _ = b @@ -18324,11 +16663,11 @@ func rewriteValueAMD64_OpSub16(v *Value, config *Config) bool { _ = b // match: (Sub16 x y) // cond: - // result: (SUBW x y) + // result: (SUBL x y) for { x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64SUBW) + v.reset(OpAMD64SUBL) v.AddArg(x) v.AddArg(y) return true @@ -18404,11 +16743,11 @@ func rewriteValueAMD64_OpSub8(v *Value, config *Config) bool { _ = b // match: (Sub8 x y) // cond: - // result: (SUBB x y) + // result: (SUBL x y) for { x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64SUBB) + v.reset(OpAMD64SUBL) v.AddArg(x) v.AddArg(y) return true @@ -18521,86 +16860,6 @@ func rewriteValueAMD64_OpTrunc64to8(v *Value, config *Config) bool { } return false } -func rewriteValueAMD64_OpAMD64XORB(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (XORB x (MOVBconst [c])) - // cond: - // result: (XORBconst [c] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVBconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64XORBconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (XORB (MOVBconst [c]) x) - // cond: - // result: (XORBconst [c] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVBconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - v.reset(OpAMD64XORBconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (XORB x x) - // cond: - // result: (MOVBconst [0]) - for { - x := v.Args[0] - if x != v.Args[1] { - break - } - v.reset(OpAMD64MOVBconst) - v.AuxInt = 0 - return true - } - return false -} -func rewriteValueAMD64_OpAMD64XORBconst(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (XORBconst [c] x) - // cond: int8(c)==0 - // result: x - for { - c := v.AuxInt - x := v.Args[0] - if !(int8(c) == 0) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (XORBconst [c] (MOVBconst [d])) - // cond: - // result: (MOVBconst [c^d]) - for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVBconst { - break - } - d := v_0.AuxInt - v.reset(OpAMD64MOVBconst) - v.AuxInt = c ^ d - return true - } - return false -} func rewriteValueAMD64_OpAMD64XORL(v *Value, config *Config) bool { b := v.Block _ = b @@ -18766,96 +17025,16 @@ func rewriteValueAMD64_OpAMD64XORQconst(v *Value, config *Config) bool { } return false } -func rewriteValueAMD64_OpAMD64XORW(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (XORW x (MOVWconst [c])) - // cond: - // result: (XORWconst [c] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVWconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64XORWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (XORW (MOVWconst [c]) x) - // cond: - // result: (XORWconst [c] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVWconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - v.reset(OpAMD64XORWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (XORW x x) - // cond: - // result: (MOVWconst [0]) - for { - x := v.Args[0] - if x != v.Args[1] { - break - } - v.reset(OpAMD64MOVWconst) - v.AuxInt = 0 - return true - } - return false -} -func rewriteValueAMD64_OpAMD64XORWconst(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (XORWconst [c] x) - // cond: int16(c)==0 - // result: x - for { - c := v.AuxInt - x := v.Args[0] - if !(int16(c) == 0) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (XORWconst [c] (MOVWconst [d])) - // cond: - // result: (MOVWconst [c^d]) - for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVWconst { - break - } - d := v_0.AuxInt - v.reset(OpAMD64MOVWconst) - v.AuxInt = c ^ d - return true - } - return false -} func rewriteValueAMD64_OpXor16(v *Value, config *Config) bool { b := v.Block _ = b // match: (Xor16 x y) // cond: - // result: (XORW x y) + // result: (XORL x y) for { x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64XORW) + v.reset(OpAMD64XORL) v.AddArg(x) v.AddArg(y) return true @@ -18899,11 +17078,11 @@ func rewriteValueAMD64_OpXor8(v *Value, config *Config) bool { _ = b // match: (Xor8 x y) // cond: - // result: (XORB x y) + // result: (XORL x y) for { x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64XORB) + v.reset(OpAMD64XORL) v.AddArg(x) v.AddArg(y) return true