1
0
mirror of https://github.com/golang/go synced 2024-09-30 12:18:33 -06:00

cmd/compile: rewrite some AMD64 rules to use typed aux fields

Surprisingly many rules needed no modification.

Use wrapper functions for aux like we did for auxint.
Simplifies things a bit.

Change-Id: I2e852e77f1585dcb306a976ab9335f1ac5b4a770
Reviewed-on: https://go-review.googlesource.com/c/go/+/227961
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Michael Munday <mike.munday@ibm.com>
This commit is contained in:
Keith Randall 2020-04-11 19:51:09 -07:00
parent 7580937524
commit 2f84caebe3
5 changed files with 336 additions and 519 deletions

View File

@ -3,113 +3,114 @@
// license that can be found in the LICENSE file.
// Lowering arithmetic
(Add(64|32|16|8) ...) -> (ADD(Q|L|L|L) ...)
(AddPtr ...) -> (ADDQ ...)
(Add(32|64)F ...) -> (ADDS(S|D) ...)
(Add(64|32|16|8) ...) => (ADD(Q|L|L|L) ...)
(AddPtr ...) => (ADDQ ...)
(Add(32|64)F ...) => (ADDS(S|D) ...)
(Sub(64|32|16|8) ...) -> (SUB(Q|L|L|L) ...)
(SubPtr ...) -> (SUBQ ...)
(Sub(32|64)F ...) -> (SUBS(S|D) ...)
(Sub(64|32|16|8) ...) => (SUB(Q|L|L|L) ...)
(SubPtr ...) => (SUBQ ...)
(Sub(32|64)F ...) => (SUBS(S|D) ...)
(Mul(64|32|16|8) ...) -> (MUL(Q|L|L|L) ...)
(Mul(32|64)F ...) -> (MULS(S|D) ...)
(Mul(64|32|16|8) ...) => (MUL(Q|L|L|L) ...)
(Mul(32|64)F ...) => (MULS(S|D) ...)
(Select0 (Mul64uover x y)) -> (Select0 <typ.UInt64> (MULQU x y))
(Select0 (Mul32uover x y)) -> (Select0 <typ.UInt32> (MULLU x y))
(Select1 (Mul(64|32)uover x y)) -> (SETO (Select1 <types.TypeFlags> (MUL(Q|L)U x y)))
(Select0 (Mul64uover x y)) => (Select0 <typ.UInt64> (MULQU x y))
(Select0 (Mul32uover x y)) => (Select0 <typ.UInt32> (MULLU x y))
(Select1 (Mul(64|32)uover x y)) => (SETO (Select1 <types.TypeFlags> (MUL(Q|L)U x y)))
(Hmul(64|32) ...) -> (HMUL(Q|L) ...)
(Hmul(64|32)u ...) -> (HMUL(Q|L)U ...)
(Hmul(64|32) ...) => (HMUL(Q|L) ...)
(Hmul(64|32)u ...) => (HMUL(Q|L)U ...)
(Div(64|32|16) [a] x y) -> (Select0 (DIV(Q|L|W) [a] x y))
(Div8 x y) -> (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y)))
(Div(64|32|16)u x y) -> (Select0 (DIV(Q|L|W)U x y))
(Div8u x y) -> (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
(Div(32|64)F ...) -> (DIVS(S|D) ...)
(Div(64|32|16) [a] x y) => (Select0 (DIV(Q|L|W) [a] x y))
(Div8 x y) => (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y)))
(Div(64|32|16)u x y) => (Select0 (DIV(Q|L|W)U x y))
(Div8u x y) => (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
(Div(32|64)F ...) => (DIVS(S|D) ...)
(Select0 (Add64carry x y c)) ->
(Select0 (Add64carry x y c)) =>
(Select0 <typ.UInt64> (ADCQ x y (Select1 <types.TypeFlags> (NEGLflags c))))
(Select1 (Add64carry x y c)) ->
(Select1 (Add64carry x y c)) =>
(NEGQ <typ.UInt64> (SBBQcarrymask <typ.UInt64> (Select1 <types.TypeFlags> (ADCQ x y (Select1 <types.TypeFlags> (NEGLflags c))))))
(Select0 (Sub64borrow x y c)) ->
(Select0 (Sub64borrow x y c)) =>
(Select0 <typ.UInt64> (SBBQ x y (Select1 <types.TypeFlags> (NEGLflags c))))
(Select1 (Sub64borrow x y c)) ->
(Select1 (Sub64borrow x y c)) =>
(NEGQ <typ.UInt64> (SBBQcarrymask <typ.UInt64> (Select1 <types.TypeFlags> (SBBQ x y (Select1 <types.TypeFlags> (NEGLflags c))))))
// Optimize ADCQ and friends
(ADCQ x (MOVQconst [c]) carry) && is32Bit(c) -> (ADCQconst x [c] carry)
(ADCQ x y (FlagEQ)) -> (ADDQcarry x y)
(ADCQconst x [c] (FlagEQ)) -> (ADDQconstcarry x [c])
(ADDQcarry x (MOVQconst [c])) && is32Bit(c) -> (ADDQconstcarry x [c])
(SBBQ x (MOVQconst [c]) borrow) && is32Bit(c) -> (SBBQconst x [c] borrow)
(SBBQ x y (FlagEQ)) -> (SUBQborrow x y)
(SBBQconst x [c] (FlagEQ)) -> (SUBQconstborrow x [c])
(SUBQborrow x (MOVQconst [c])) && is32Bit(c) -> (SUBQconstborrow x [c])
(Select1 (NEGLflags (MOVQconst [0]))) -> (FlagEQ)
(Select1 (NEGLflags (NEGQ (SBBQcarrymask x)))) -> x
(ADCQ x (MOVQconst [c]) carry) && is32Bit(c) => (ADCQconst x [int32(c)] carry)
(ADCQ x y (FlagEQ)) => (ADDQcarry x y)
(ADCQconst x [c] (FlagEQ)) => (ADDQconstcarry x [c])
(ADDQcarry x (MOVQconst [c])) && is32Bit(c) => (ADDQconstcarry x [int32(c)])
(SBBQ x (MOVQconst [c]) borrow) && is32Bit(c) => (SBBQconst x [int32(c)] borrow)
(SBBQ x y (FlagEQ)) => (SUBQborrow x y)
(SBBQconst x [c] (FlagEQ)) => (SUBQconstborrow x [c])
(SUBQborrow x (MOVQconst [c])) && is32Bit(c) => (SUBQconstborrow x [int32(c)])
(Select1 (NEGLflags (MOVQconst [0]))) => (FlagEQ)
(Select1 (NEGLflags (NEGQ (SBBQcarrymask x)))) => x
(Mul64uhilo ...) -> (MULQU2 ...)
(Div128u ...) -> (DIVQU2 ...)
(Mul64uhilo ...) => (MULQU2 ...)
(Div128u ...) => (DIVQU2 ...)
(Avg64u ...) -> (AVGQU ...)
(Avg64u ...) => (AVGQU ...)
(Mod(64|32|16) [a] x y) -> (Select1 (DIV(Q|L|W) [a] x y))
(Mod8 x y) -> (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y)))
(Mod(64|32|16)u x y) -> (Select1 (DIV(Q|L|W)U x y))
(Mod8u x y) -> (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
(Mod(64|32|16) [a] x y) => (Select1 (DIV(Q|L|W) [a] x y))
(Mod8 x y) => (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y)))
(Mod(64|32|16)u x y) => (Select1 (DIV(Q|L|W)U x y))
(Mod8u x y) => (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
(And(64|32|16|8) ...) -> (AND(Q|L|L|L) ...)
(Or(64|32|16|8) ...) -> (OR(Q|L|L|L) ...)
(Xor(64|32|16|8) ...) -> (XOR(Q|L|L|L) ...)
(Com(64|32|16|8) ...) -> (NOT(Q|L|L|L) ...)
(And(64|32|16|8) ...) => (AND(Q|L|L|L) ...)
(Or(64|32|16|8) ...) => (OR(Q|L|L|L) ...)
(Xor(64|32|16|8) ...) => (XOR(Q|L|L|L) ...)
(Com(64|32|16|8) ...) => (NOT(Q|L|L|L) ...)
(Neg(64|32|16|8) ...) -> (NEG(Q|L|L|L) ...)
(Neg32F x) -> (PXOR x (MOVSSconst <typ.Float32> [auxFrom32F(float32(math.Copysign(0, -1)))]))
(Neg64F x) -> (PXOR x (MOVSDconst <typ.Float64> [auxFrom64F(math.Copysign(0, -1))]))
(Neg(64|32|16|8) ...) => (NEG(Q|L|L|L) ...)
(Neg32F x) => (PXOR x (MOVSSconst <typ.Float32> [float32(math.Copysign(0, -1))]))
(Neg64F x) => (PXOR x (MOVSDconst <typ.Float64> [math.Copysign(0, -1)]))
// Lowering boolean ops
(AndB ...) -> (ANDL ...)
(OrB ...) -> (ORL ...)
(Not x) -> (XORLconst [1] x)
(AndB ...) => (ANDL ...)
(OrB ...) => (ORL ...)
(Not x) => (XORLconst [1] x)
// Lowering pointer arithmetic
(OffPtr [off] ptr) && is32Bit(off) -> (ADDQconst [off] ptr)
(OffPtr [off] ptr) -> (ADDQ (MOVQconst [off]) ptr)
(OffPtr [off] ptr) && is32Bit(off) => (ADDQconst [int32(off)] ptr)
(OffPtr [off] ptr) => (ADDQ (MOVQconst [off]) ptr)
// Lowering other arithmetic
(Ctz64 <t> x) -> (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <types.TypeFlags> (BSFQ x)))
(Ctz32 x) -> (Select0 (BSFQ (BTSQconst <typ.UInt64> [32] x)))
(Ctz16 x) -> (BSFL (BTSLconst <typ.UInt32> [16] x))
(Ctz8 x) -> (BSFL (BTSLconst <typ.UInt32> [ 8] x))
(Ctz64 <t> x) => (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <types.TypeFlags> (BSFQ x)))
(Ctz32 x) => (Select0 (BSFQ (BTSQconst <typ.UInt64> [32] x)))
(Ctz16 x) => (BSFL (BTSLconst <typ.UInt32> [16] x))
(Ctz8 x) => (BSFL (BTSLconst <typ.UInt32> [ 8] x))
(Ctz64NonZero x) -> (Select0 (BSFQ x))
(Ctz32NonZero ...) -> (BSFL ...)
(Ctz16NonZero ...) -> (BSFL ...)
(Ctz8NonZero ...) -> (BSFL ...)
(Ctz64NonZero x) => (Select0 (BSFQ x))
(Ctz32NonZero ...) => (BSFL ...)
(Ctz16NonZero ...) => (BSFL ...)
(Ctz8NonZero ...) => (BSFL ...)
// BitLen64 of a 64 bit value x requires checking whether x == 0, since BSRQ is undefined when x == 0.
// However, for zero-extended values, we can cheat a bit, and calculate
// BSR(x<<1 + 1), which is guaranteed to be non-zero, and which conveniently
// places the index of the highest set bit where we want it.
(BitLen64 <t> x) -> (ADDQconst [1] (CMOVQEQ <t> (Select0 <t> (BSRQ x)) (MOVQconst <t> [-1]) (Select1 <types.TypeFlags> (BSRQ x))))
(BitLen32 x) -> (Select0 (BSRQ (LEAQ1 <typ.UInt64> [1] (MOVLQZX <typ.UInt64> x) (MOVLQZX <typ.UInt64> x))))
(BitLen16 x) -> (BSRL (LEAL1 <typ.UInt32> [1] (MOVWQZX <typ.UInt32> x) (MOVWQZX <typ.UInt32> x)))
(BitLen8 x) -> (BSRL (LEAL1 <typ.UInt32> [1] (MOVBQZX <typ.UInt32> x) (MOVBQZX <typ.UInt32> x)))
(BitLen64 <t> x) => (ADDQconst [1] (CMOVQEQ <t> (Select0 <t> (BSRQ x)) (MOVQconst <t> [-1]) (Select1 <types.TypeFlags> (BSRQ x))))
(BitLen32 x) => (Select0 (BSRQ (LEAQ1 <typ.UInt64> [1] (MOVLQZX <typ.UInt64> x) (MOVLQZX <typ.UInt64> x))))
(BitLen16 x) => (BSRL (LEAL1 <typ.UInt32> [1] (MOVWQZX <typ.UInt32> x) (MOVWQZX <typ.UInt32> x)))
(BitLen8 x) => (BSRL (LEAL1 <typ.UInt32> [1] (MOVBQZX <typ.UInt32> x) (MOVBQZX <typ.UInt32> x)))
(Bswap(64|32) ...) -> (BSWAP(Q|L) ...)
(Bswap(64|32) ...) => (BSWAP(Q|L) ...)
(PopCount(64|32) ...) -> (POPCNT(Q|L) ...)
(PopCount16 x) -> (POPCNTL (MOVWQZX <typ.UInt32> x))
(PopCount8 x) -> (POPCNTL (MOVBQZX <typ.UInt32> x))
(PopCount(64|32) ...) => (POPCNT(Q|L) ...)
(PopCount16 x) => (POPCNTL (MOVWQZX <typ.UInt32> x))
(PopCount8 x) => (POPCNTL (MOVBQZX <typ.UInt32> x))
(Sqrt ...) -> (SQRTSD ...)
(Sqrt ...) => (SQRTSD ...)
(RoundToEven x) -> (ROUNDSD [0] x)
(Floor x) -> (ROUNDSD [1] x)
(Ceil x) -> (ROUNDSD [2] x)
(Trunc x) -> (ROUNDSD [3] x)
(FMA x y z) -> (VFMADD231SD z x y)
(RoundToEven x) => (ROUNDSD [0] x)
(Floor x) => (ROUNDSD [1] x)
(Ceil x) => (ROUNDSD [2] x)
(Trunc x) => (ROUNDSD [3] x)
(FMA x y z) => (VFMADD231SD z x y)
// Lowering extension
// Note: we always extend to 64 bits even though some ops don't need that many result bits.

View File

@ -671,10 +671,6 @@ func fprint(w io.Writer, n Node) {
fmt.Fprintf(w, "%s := ", n.name)
fprint(w, n.value)
fmt.Fprintln(w)
case *Declare2:
fmt.Fprintf(w, "%s, %s := ", n.name1, n.name2)
fprint(w, n.value)
fmt.Fprintln(w)
case *CondBreak:
fmt.Fprintf(w, "if ")
fprint(w, n.expr)
@ -746,7 +742,7 @@ var predeclared = map[string]bool{
"true": true,
}
// declared reports if the body contains a Declare or Declare2 with the given name.
// declared reports if the body contains a Declare with the given name.
func (w *bodyBase) declared(name string) bool {
if predeclared[name] {
// Treat predeclared names as having already been declared.
@ -758,9 +754,6 @@ func (w *bodyBase) declared(name string) bool {
if decl, ok := s.(*Declare); ok && decl.name == name {
return true
}
if decl, ok := s.(*Declare2); ok && (decl.name1 == name || decl.name2 == name) {
return true
}
}
return false
}
@ -810,10 +803,6 @@ type (
name string
value ast.Expr
}
Declare2 struct {
name1, name2 string
value ast.Expr
}
// TODO: implement CondBreak as If + Break instead?
CondBreak struct {
expr ast.Expr
@ -855,12 +844,6 @@ func declf(name, format string, a ...interface{}) *Declare {
return &Declare{name, exprf(format, a...)}
}
// decl2f constructs a simple "name1, name2 := value" declaration, using exprf for its
// value.
func decl2f(name1, name2, format string, a ...interface{}) *Declare2 {
return &Declare2{name1, name2, exprf(format, a...)}
}
// breakf constructs a simple "if cond { break }" statement, using exprf for its
// condition.
func breakf(format string, a ...interface{}) *CondBreak {
@ -1051,11 +1034,7 @@ func genMatch0(rr *RuleRewrite, arch arch, match, v string, cnt map[string]int,
if !token.IsIdentifier(e.name) || rr.declared(e.name) {
switch e.field {
case "Aux":
rr.add(&If{
expr: exprf("%s.%s == nil", v, e.field),
stmt: breakf("%s == nil", e.name),
alt: breakf("%s.%s.(%s) == %s", v, e.field, e.dclType, e.name),
})
rr.add(breakf("auxTo%s(%s.%s) != %s", strings.Title(e.dclType), v, e.field, e.name))
case "AuxInt":
rr.add(breakf("auxIntTo%s(%s.%s) != %s", strings.Title(e.dclType), v, e.field, e.name))
case "Type":
@ -1064,12 +1043,7 @@ func genMatch0(rr *RuleRewrite, arch arch, match, v string, cnt map[string]int,
} else {
switch e.field {
case "Aux":
if e.dclType == "Sym" {
// TODO: kind of a hack - allows nil interface through
rr.add(decl2f(e.name, "_", "%s.Aux.(Sym)", v))
} else {
rr.add(declf(e.name, "%s.%s.(%s)", v, e.field, e.dclType))
}
rr.add(declf(e.name, "auxTo%s(%s.%s)", strings.Title(e.dclType), v, e.field))
case "AuxInt":
rr.add(declf(e.name, "auxIntTo%s(%s.%s)", strings.Title(e.dclType), v, e.field))
case "Type":
@ -1237,8 +1211,7 @@ func genResult0(rr *RuleRewrite, arch arch, result string, top, move bool, pos s
if auxint != "" {
if rr.typed {
// Make sure auxint value has the right type.
rr.add(stmtf("var _auxint %s = %s", op.auxIntType(), auxint))
rr.add(stmtf("%s.AuxInt = %sToAuxInt(_auxint)", v, op.auxIntType()))
rr.add(stmtf("%s.AuxInt = %sToAuxInt(%s)", v, unTitle(op.auxIntType()), auxint))
} else {
rr.add(stmtf("%s.AuxInt = %s", v, auxint))
}
@ -1246,8 +1219,7 @@ func genResult0(rr *RuleRewrite, arch arch, result string, top, move bool, pos s
if aux != "" {
if rr.typed {
// Make sure aux value has the right type.
rr.add(stmtf("var _aux %s = %s", op.auxType(), aux))
rr.add(stmtf("%s.Aux = _aux", v))
rr.add(stmtf("%s.Aux = %sToAux(%s)", v, unTitle(op.auxType()), aux))
} else {
rr.add(stmtf("%s.Aux = %s", v, aux))
}
@ -1807,3 +1779,7 @@ func (op opData) auxIntType() string {
return "invalid"
}
}
func unTitle(s string) string {
return strings.ToLower(s[:1]) + s[1:]
}

View File

@ -593,10 +593,26 @@ func float32ToAuxInt(f float32) int64 {
func float64ToAuxInt(f float64) int64 {
return int64(math.Float64bits(f))
}
func ValAndOffToAuxInt(v ValAndOff) int64 {
func valAndOffToAuxInt(v ValAndOff) int64 {
return int64(v)
}
func auxToString(i interface{}) string {
return i.(string)
}
func auxToSym(i interface{}) Sym {
// TODO: kind of a hack - allows nil interface through
s, _ := i.(Sym)
return s
}
func stringToAux(s string) interface{} {
return s
}
func symToAux(s Sym) interface{} {
return s
}
// uaddOvf reports whether unsigned a+b would overflow.
func uaddOvf(a, b int64) bool {
return uint64(a)+uint64(b) < uint64(a)

View File

@ -1187,20 +1187,20 @@ func rewriteValueAMD64_OpAMD64ADCQ(v *Value) bool {
v_0 := v.Args[0]
// match: (ADCQ x (MOVQconst [c]) carry)
// cond: is32Bit(c)
// result: (ADCQconst x [c] carry)
// result: (ADCQconst x [int32(c)] carry)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
continue
}
c := v_1.AuxInt
c := auxIntToInt64(v_1.AuxInt)
carry := v_2
if !(is32Bit(c)) {
continue
}
v.reset(OpAMD64ADCQconst)
v.AuxInt = c
v.AuxInt = int32ToAuxInt(int32(c))
v.AddArg2(x, carry)
return true
}
@ -1226,13 +1226,13 @@ func rewriteValueAMD64_OpAMD64ADCQconst(v *Value) bool {
// match: (ADCQconst x [c] (FlagEQ))
// result: (ADDQconstcarry x [c])
for {
c := v.AuxInt
c := auxIntToInt32(v.AuxInt)
x := v_0
if v_1.Op != OpAMD64FlagEQ {
break
}
v.reset(OpAMD64ADDQconstcarry)
v.AuxInt = c
v.AuxInt = int32ToAuxInt(c)
v.AddArg(x)
return true
}
@ -2097,19 +2097,19 @@ func rewriteValueAMD64_OpAMD64ADDQcarry(v *Value) bool {
v_0 := v.Args[0]
// match: (ADDQcarry x (MOVQconst [c]))
// cond: is32Bit(c)
// result: (ADDQconstcarry x [c])
// result: (ADDQconstcarry x [int32(c)])
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
continue
}
c := v_1.AuxInt
c := auxIntToInt64(v_1.AuxInt)
if !(is32Bit(c)) {
continue
}
v.reset(OpAMD64ADDQconstcarry)
v.AuxInt = c
v.AuxInt = int32ToAuxInt(int32(c))
v.AddArg(x)
return true
}
@ -6946,7 +6946,7 @@ func rewriteValueAMD64_OpAMD64CMPBconst(v *Value) bool {
break
}
off := auxIntToInt32(l.AuxInt)
sym, _ := l.Aux.(Sym)
sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
if !(l.Uses == 1 && clobber(l)) {
@ -6955,10 +6955,8 @@ func rewriteValueAMD64_OpAMD64CMPBconst(v *Value) bool {
b = l.Block
v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags)
v.copyOf(v0)
var _auxint ValAndOff = makeValAndOff32(int32(c), off)
v0.AuxInt = ValAndOffToAuxInt(_auxint)
var _aux Sym = sym
v0.Aux = _aux
v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off))
v0.Aux = symToAux(sym)
v0.AddArg2(ptr, mem)
return true
}
@ -7333,7 +7331,7 @@ func rewriteValueAMD64_OpAMD64CMPLconst(v *Value) bool {
break
}
off := auxIntToInt32(l.AuxInt)
sym, _ := l.Aux.(Sym)
sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
if !(l.Uses == 1 && clobber(l)) {
@ -7342,10 +7340,8 @@ func rewriteValueAMD64_OpAMD64CMPLconst(v *Value) bool {
b = l.Block
v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags)
v.copyOf(v0)
var _auxint ValAndOff = makeValAndOff32(c, off)
v0.AuxInt = ValAndOffToAuxInt(_auxint)
var _aux Sym = sym
v0.Aux = _aux
v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(c, off))
v0.Aux = symToAux(sym)
v0.AddArg2(ptr, mem)
return true
}
@ -7900,7 +7896,7 @@ func rewriteValueAMD64_OpAMD64CMPQconst(v *Value) bool {
break
}
off := auxIntToInt32(l.AuxInt)
sym, _ := l.Aux.(Sym)
sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
if !(l.Uses == 1 && clobber(l)) {
@ -7909,10 +7905,8 @@ func rewriteValueAMD64_OpAMD64CMPQconst(v *Value) bool {
b = l.Block
v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags)
v.copyOf(v0)
var _auxint ValAndOff = makeValAndOff32(c, off)
v0.AuxInt = ValAndOffToAuxInt(_auxint)
var _aux Sym = sym
v0.Aux = _aux
v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(c, off))
v0.Aux = symToAux(sym)
v0.AddArg2(ptr, mem)
return true
}
@ -8272,7 +8266,7 @@ func rewriteValueAMD64_OpAMD64CMPWconst(v *Value) bool {
break
}
off := auxIntToInt32(l.AuxInt)
sym, _ := l.Aux.(Sym)
sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
if !(l.Uses == 1 && clobber(l)) {
@ -8281,10 +8275,8 @@ func rewriteValueAMD64_OpAMD64CMPWconst(v *Value) bool {
b = l.Block
v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags)
v.copyOf(v0)
var _auxint ValAndOff = makeValAndOff32(int32(c), off)
v0.AuxInt = ValAndOffToAuxInt(_auxint)
var _aux Sym = sym
v0.Aux = _aux
v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off))
v0.Aux = symToAux(sym)
v0.AddArg2(ptr, mem)
return true
}
@ -21179,19 +21171,19 @@ func rewriteValueAMD64_OpAMD64SBBQ(v *Value) bool {
v_0 := v.Args[0]
// match: (SBBQ x (MOVQconst [c]) borrow)
// cond: is32Bit(c)
// result: (SBBQconst x [c] borrow)
// result: (SBBQconst x [int32(c)] borrow)
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
break
}
c := v_1.AuxInt
c := auxIntToInt64(v_1.AuxInt)
borrow := v_2
if !(is32Bit(c)) {
break
}
v.reset(OpAMD64SBBQconst)
v.AuxInt = c
v.AuxInt = int32ToAuxInt(int32(c))
v.AddArg2(x, borrow)
return true
}
@ -21269,13 +21261,13 @@ func rewriteValueAMD64_OpAMD64SBBQconst(v *Value) bool {
// match: (SBBQconst x [c] (FlagEQ))
// result: (SUBQconstborrow x [c])
for {
c := v.AuxInt
c := auxIntToInt32(v.AuxInt)
x := v_0
if v_1.Op != OpAMD64FlagEQ {
break
}
v.reset(OpAMD64SUBQconstborrow)
v.AuxInt = c
v.AuxInt = int32ToAuxInt(c)
v.AddArg(x)
return true
}
@ -26563,18 +26555,18 @@ func rewriteValueAMD64_OpAMD64SUBQborrow(v *Value) bool {
v_0 := v.Args[0]
// match: (SUBQborrow x (MOVQconst [c]))
// cond: is32Bit(c)
// result: (SUBQconstborrow x [c])
// result: (SUBQconstborrow x [int32(c)])
for {
x := v_0
if v_1.Op != OpAMD64MOVQconst {
break
}
c := v_1.AuxInt
c := auxIntToInt64(v_1.AuxInt)
if !(is32Bit(c)) {
break
}
v.reset(OpAMD64SUBQconstborrow)
v.AuxInt = c
v.AuxInt = int32ToAuxInt(int32(c))
v.AddArg(x)
return true
}
@ -28502,7 +28494,7 @@ func rewriteValueAMD64_OpBitLen16(v *Value) bool {
x := v_0
v.reset(OpAMD64BSRL)
v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32)
v0.AuxInt = 1
v0.AuxInt = int32ToAuxInt(1)
v1 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32)
v1.AddArg(x)
v2 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32)
@ -28523,7 +28515,7 @@ func rewriteValueAMD64_OpBitLen32(v *Value) bool {
v.reset(OpSelect0)
v0 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags))
v1 := b.NewValue0(v.Pos, OpAMD64LEAQ1, typ.UInt64)
v1.AuxInt = 1
v1.AuxInt = int32ToAuxInt(1)
v2 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64)
v2.AddArg(x)
v3 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64)
@ -28544,14 +28536,14 @@ func rewriteValueAMD64_OpBitLen64(v *Value) bool {
t := v.Type
x := v_0
v.reset(OpAMD64ADDQconst)
v.AuxInt = 1
v.AuxInt = int32ToAuxInt(1)
v0 := b.NewValue0(v.Pos, OpAMD64CMOVQEQ, t)
v1 := b.NewValue0(v.Pos, OpSelect0, t)
v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags))
v2.AddArg(x)
v1.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t)
v3.AuxInt = -1
v3.AuxInt = int64ToAuxInt(-1)
v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
v5 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags))
v5.AddArg(x)
@ -28571,7 +28563,7 @@ func rewriteValueAMD64_OpBitLen8(v *Value) bool {
x := v_0
v.reset(OpAMD64BSRL)
v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32)
v0.AuxInt = 1
v0.AuxInt = int32ToAuxInt(1)
v1 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32)
v1.AddArg(x)
v2 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32)
@ -28588,7 +28580,7 @@ func rewriteValueAMD64_OpCeil(v *Value) bool {
for {
x := v_0
v.reset(OpAMD64ROUNDSD)
v.AuxInt = 2
v.AuxInt = int8ToAuxInt(2)
v.AddArg(x)
return true
}
@ -29475,7 +29467,7 @@ func rewriteValueAMD64_OpCtz16(v *Value) bool {
x := v_0
v.reset(OpAMD64BSFL)
v0 := b.NewValue0(v.Pos, OpAMD64BTSLconst, typ.UInt32)
v0.AuxInt = 16
v0.AuxInt = int8ToAuxInt(16)
v0.AddArg(x)
v.AddArg(v0)
return true
@ -29492,7 +29484,7 @@ func rewriteValueAMD64_OpCtz32(v *Value) bool {
v.reset(OpSelect0)
v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags))
v1 := b.NewValue0(v.Pos, OpAMD64BTSQconst, typ.UInt64)
v1.AuxInt = 32
v1.AuxInt = int8ToAuxInt(32)
v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
@ -29514,7 +29506,7 @@ func rewriteValueAMD64_OpCtz64(v *Value) bool {
v1.AddArg(x)
v0.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t)
v2.AuxInt = 64
v2.AuxInt = int64ToAuxInt(64)
v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
v4 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags))
v4.AddArg(x)
@ -29548,7 +29540,7 @@ func rewriteValueAMD64_OpCtz8(v *Value) bool {
x := v_0
v.reset(OpAMD64BSFL)
v0 := b.NewValue0(v.Pos, OpAMD64BTSLconst, typ.UInt32)
v0.AuxInt = 8
v0.AuxInt = int8ToAuxInt(8)
v0.AddArg(x)
v.AddArg(v0)
return true
@ -29562,12 +29554,12 @@ func rewriteValueAMD64_OpDiv16(v *Value) bool {
// match: (Div16 [a] x y)
// result: (Select0 (DIVW [a] x y))
for {
a := v.AuxInt
a := auxIntToBool(v.AuxInt)
x := v_0
y := v_1
v.reset(OpSelect0)
v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
v0.AuxInt = a
v0.AuxInt = boolToAuxInt(a)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
@ -29598,12 +29590,12 @@ func rewriteValueAMD64_OpDiv32(v *Value) bool {
// match: (Div32 [a] x y)
// result: (Select0 (DIVL [a] x y))
for {
a := v.AuxInt
a := auxIntToBool(v.AuxInt)
x := v_0
y := v_1
v.reset(OpSelect0)
v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32))
v0.AuxInt = a
v0.AuxInt = boolToAuxInt(a)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
@ -29634,12 +29626,12 @@ func rewriteValueAMD64_OpDiv64(v *Value) bool {
// match: (Div64 [a] x y)
// result: (Select0 (DIVQ [a] x y))
for {
a := v.AuxInt
a := auxIntToBool(v.AuxInt)
x := v_0
y := v_1
v.reset(OpSelect0)
v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64))
v0.AuxInt = a
v0.AuxInt = boolToAuxInt(a)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
@ -29854,7 +29846,7 @@ func rewriteValueAMD64_OpFloor(v *Value) bool {
for {
x := v_0
v.reset(OpAMD64ROUNDSD)
v.AuxInt = 1
v.AuxInt = int8ToAuxInt(1)
v.AddArg(x)
return true
}
@ -30992,12 +30984,12 @@ func rewriteValueAMD64_OpMod16(v *Value) bool {
// match: (Mod16 [a] x y)
// result: (Select1 (DIVW [a] x y))
for {
a := v.AuxInt
a := auxIntToBool(v.AuxInt)
x := v_0
y := v_1
v.reset(OpSelect1)
v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
v0.AuxInt = a
v0.AuxInt = boolToAuxInt(a)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
@ -31028,12 +31020,12 @@ func rewriteValueAMD64_OpMod32(v *Value) bool {
// match: (Mod32 [a] x y)
// result: (Select1 (DIVL [a] x y))
for {
a := v.AuxInt
a := auxIntToBool(v.AuxInt)
x := v_0
y := v_1
v.reset(OpSelect1)
v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32))
v0.AuxInt = a
v0.AuxInt = boolToAuxInt(a)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
@ -31064,12 +31056,12 @@ func rewriteValueAMD64_OpMod64(v *Value) bool {
// match: (Mod64 [a] x y)
// result: (Select1 (DIVQ [a] x y))
for {
a := v.AuxInt
a := auxIntToBool(v.AuxInt)
x := v_0
y := v_1
v.reset(OpSelect1)
v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64))
v0.AuxInt = a
v0.AuxInt = boolToAuxInt(a)
v0.AddArg2(x, y)
v.AddArg(v0)
return true
@ -31626,12 +31618,12 @@ func rewriteValueAMD64_OpNeg32F(v *Value) bool {
b := v.Block
typ := &b.Func.Config.Types
// match: (Neg32F x)
// result: (PXOR x (MOVSSconst <typ.Float32> [auxFrom32F(float32(math.Copysign(0, -1)))]))
// result: (PXOR x (MOVSSconst <typ.Float32> [float32(math.Copysign(0, -1))]))
for {
x := v_0
v.reset(OpAMD64PXOR)
v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, typ.Float32)
v0.AuxInt = auxFrom32F(float32(math.Copysign(0, -1)))
v0.AuxInt = float32ToAuxInt(float32(math.Copysign(0, -1)))
v.AddArg2(x, v0)
return true
}
@ -31641,12 +31633,12 @@ func rewriteValueAMD64_OpNeg64F(v *Value) bool {
b := v.Block
typ := &b.Func.Config.Types
// match: (Neg64F x)
// result: (PXOR x (MOVSDconst <typ.Float64> [auxFrom64F(math.Copysign(0, -1))]))
// result: (PXOR x (MOVSDconst <typ.Float64> [math.Copysign(0, -1)]))
for {
x := v_0
v.reset(OpAMD64PXOR)
v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, typ.Float64)
v0.AuxInt = auxFrom64F(math.Copysign(0, -1))
v0.AuxInt = float64ToAuxInt(math.Copysign(0, -1))
v.AddArg2(x, v0)
return true
}
@ -31786,7 +31778,7 @@ func rewriteValueAMD64_OpNot(v *Value) bool {
for {
x := v_0
v.reset(OpAMD64XORLconst)
v.AuxInt = 1
v.AuxInt = int32ToAuxInt(1)
v.AddArg(x)
return true
}
@ -31797,26 +31789,26 @@ func rewriteValueAMD64_OpOffPtr(v *Value) bool {
typ := &b.Func.Config.Types
// match: (OffPtr [off] ptr)
// cond: is32Bit(off)
// result: (ADDQconst [off] ptr)
// result: (ADDQconst [int32(off)] ptr)
for {
off := v.AuxInt
off := auxIntToInt64(v.AuxInt)
ptr := v_0
if !(is32Bit(off)) {
break
}
v.reset(OpAMD64ADDQconst)
v.AuxInt = off
v.AuxInt = int32ToAuxInt(int32(off))
v.AddArg(ptr)
return true
}
// match: (OffPtr [off] ptr)
// result: (ADDQ (MOVQconst [off]) ptr)
for {
off := v.AuxInt
off := auxIntToInt64(v.AuxInt)
ptr := v_0
v.reset(OpAMD64ADDQ)
v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
v0.AuxInt = off
v0.AuxInt = int64ToAuxInt(off)
v.AddArg2(v0, ptr)
return true
}
@ -31912,7 +31904,7 @@ func rewriteValueAMD64_OpRoundToEven(v *Value) bool {
for {
x := v_0
v.reset(OpAMD64ROUNDSD)
v.AuxInt = 0
v.AuxInt = int8ToAuxInt(0)
v.AddArg(x)
return true
}
@ -33442,7 +33434,7 @@ func rewriteValueAMD64_OpSelect1(v *Value) bool {
break
}
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpAMD64MOVQconst || v_0_0.AuxInt != 0 {
if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 0 {
break
}
v.reset(OpAMD64FlagEQ)
@ -33647,7 +33639,7 @@ func rewriteValueAMD64_OpTrunc(v *Value) bool {
for {
x := v_0
v.reset(OpAMD64ROUNDSD)
v.AuxInt = 3
v.AuxInt = int8ToAuxInt(3)
v.AddArg(x)
return true
}

File diff suppressed because it is too large Load Diff