1
0
mirror of https://github.com/golang/go synced 2024-11-18 02:54:47 -07:00

cmd/compile: remove amd64p32 rules

And simplify the remaining rules.

Updates #30439

Change-Id: Ib89dce16b17ae881824178346ed6ab895b79627e
Reviewed-on: https://go-review.googlesource.com/c/go/+/204600
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
This commit is contained in:
Josh Bleecher Snyder 2019-10-31 15:33:25 -07:00
parent 15bff20829
commit ce49f9506a
2 changed files with 22 additions and 314 deletions

View File

@ -4,13 +4,11 @@
// Lowering arithmetic
(Add(64|32|16|8) x y) -> (ADD(Q|L|L|L) x y)
(AddPtr x y) && config.PtrSize == 8 -> (ADDQ x y)
(AddPtr x y) && config.PtrSize == 4 -> (ADDL x y)
(AddPtr x y) -> (ADDQ x y)
(Add(32|64)F x y) -> (ADDS(S|D) x y)
(Sub(64|32|16|8) x y) -> (SUB(Q|L|L|L) x y)
(SubPtr x y) && config.PtrSize == 8 -> (SUBQ x y)
(SubPtr x y) && config.PtrSize == 4 -> (SUBL x y)
(SubPtr x y) -> (SUBQ x y)
(Sub(32|64)F x y) -> (SUBS(S|D) x y)
(Mul(64|32|16|8) x y) -> (MUL(Q|L|L|L) x y)
@ -76,9 +74,8 @@
(Not x) -> (XORLconst [1] x)
// Lowering pointer arithmetic
(OffPtr [off] ptr) && config.PtrSize == 8 && is32Bit(off) -> (ADDQconst [off] ptr)
(OffPtr [off] ptr) && config.PtrSize == 8 -> (ADDQ (MOVQconst [off]) ptr)
(OffPtr [off] ptr) && config.PtrSize == 4 -> (ADDLconst [off] ptr)
(OffPtr [off] ptr) && is32Bit(off) -> (ADDQconst [off] ptr)
(OffPtr [off] ptr) -> (ADDQ (MOVQconst [off]) ptr)
// Lowering other arithmetic
(Ctz64 <t> x) -> (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <types.TypeFlags> (BSFQ x)))
@ -217,18 +214,16 @@
(Geq(32|64)F x y) -> (SETGEF (UCOMIS(S|D) x y))
(Eq(64|32|16|8|B) x y) -> (SETEQ (CMP(Q|L|W|B|B) x y))
(EqPtr x y) && config.PtrSize == 8 -> (SETEQ (CMPQ x y))
(EqPtr x y) && config.PtrSize == 4 -> (SETEQ (CMPL x y))
(EqPtr x y) -> (SETEQ (CMPQ x y))
(Eq(32|64)F x y) -> (SETEQF (UCOMIS(S|D) x y))
(Neq(64|32|16|8|B) x y) -> (SETNE (CMP(Q|L|W|B|B) x y))
(NeqPtr x y) && config.PtrSize == 8 -> (SETNE (CMPQ x y))
(NeqPtr x y) && config.PtrSize == 4 -> (SETNE (CMPL x y))
(NeqPtr x y) -> (SETNE (CMPQ x y))
(Neq(32|64)F x y) -> (SETNEF (UCOMIS(S|D) x y))
// Lowering loads
(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t) && config.PtrSize == 8) -> (MOVQload ptr mem)
(Load <t> ptr mem) && (is32BitInt(t) || isPtr(t) && config.PtrSize == 4) -> (MOVLload ptr mem)
(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) -> (MOVQload ptr mem)
(Load <t> ptr mem) && is32BitInt(t) -> (MOVLload ptr mem)
(Load <t> ptr mem) && is16BitInt(t) -> (MOVWload ptr mem)
(Load <t> ptr mem) && (t.IsBoolean() || is8BitInt(t)) -> (MOVBload ptr mem)
(Load <t> ptr mem) && is32BitFloat(t) -> (MOVSSload ptr mem)
@ -420,8 +415,7 @@
(Const64 [val]) -> (MOVQconst [val])
(Const32F [val]) -> (MOVSSconst [val])
(Const64F [val]) -> (MOVSDconst [val])
(ConstNil) && config.PtrSize == 8 -> (MOVQconst [0])
(ConstNil) && config.PtrSize == 4 -> (MOVLconst [0])
(ConstNil) -> (MOVQconst [0])
(ConstBool [b]) -> (MOVLconst [b])
// Lowering calls
@ -476,21 +470,16 @@
(CMOV(QEQ|QGT|QGE|QCS|QLS|LEQ|LGT|LGE|LCS|LLS|WEQ|WGT|WGE|WCS|WLS) y _ (FlagLT_UGT)) -> y
// Miscellaneous
(IsNonNil p) && config.PtrSize == 8 -> (SETNE (TESTQ p p))
(IsNonNil p) && config.PtrSize == 4 -> (SETNE (TESTL p p))
(IsInBounds idx len) && config.PtrSize == 8 -> (SETB (CMPQ idx len))
(IsInBounds idx len) && config.PtrSize == 4 -> (SETB (CMPL idx len))
(IsSliceInBounds idx len) && config.PtrSize == 8 -> (SETBE (CMPQ idx len))
(IsSliceInBounds idx len) && config.PtrSize == 4 -> (SETBE (CMPL idx len))
(IsNonNil p) -> (SETNE (TESTQ p p))
(IsInBounds idx len) -> (SETB (CMPQ idx len))
(IsSliceInBounds idx len) -> (SETBE (CMPQ idx len))
(NilCheck ptr mem) -> (LoweredNilCheck ptr mem)
(GetG mem) -> (LoweredGetG mem)
(GetClosurePtr) -> (LoweredGetClosurePtr)
(GetCallerPC) -> (LoweredGetCallerPC)
(GetCallerSP) -> (LoweredGetCallerSP)
(Addr {sym} base) && config.PtrSize == 8 -> (LEAQ {sym} base)
(Addr {sym} base) && config.PtrSize == 4 -> (LEAL {sym} base)
(LocalAddr {sym} base _) && config.PtrSize == 8 -> (LEAQ {sym} base)
(LocalAddr {sym} base _) && config.PtrSize == 4 -> (LEAL {sym} base)
(Addr {sym} base) -> (LEAQ {sym} base)
(LocalAddr {sym} base _) -> (LEAQ {sym} base)
(MOVBstore [off] {sym} ptr y:(SETL x) mem) && y.Uses == 1 -> (SETLstore [off] {sym} ptr x mem)
(MOVBstore [off] {sym} ptr y:(SETLE x) mem) && y.Uses == 1 -> (SETLEstore [off] {sym} ptr x mem)
@ -528,16 +517,14 @@
(AtomicLoad8 ptr mem) -> (MOVBatomicload ptr mem)
(AtomicLoad32 ptr mem) -> (MOVLatomicload ptr mem)
(AtomicLoad64 ptr mem) -> (MOVQatomicload ptr mem)
(AtomicLoadPtr ptr mem) && config.PtrSize == 8 -> (MOVQatomicload ptr mem)
(AtomicLoadPtr ptr mem) && config.PtrSize == 4 -> (MOVLatomicload ptr mem)
(AtomicLoadPtr ptr mem) -> (MOVQatomicload ptr mem)
// Atomic stores. We use XCHG to prevent the hardware reordering a subsequent load.
// TODO: most runtime uses of atomic stores don't need that property. Use normal stores for those?
(AtomicStore8 ptr val mem) -> (Select1 (XCHGB <types.NewTuple(typ.UInt8,types.TypeMem)> val ptr mem))
(AtomicStore32 ptr val mem) -> (Select1 (XCHGL <types.NewTuple(typ.UInt32,types.TypeMem)> val ptr mem))
(AtomicStore64 ptr val mem) -> (Select1 (XCHGQ <types.NewTuple(typ.UInt64,types.TypeMem)> val ptr mem))
(AtomicStorePtrNoWB ptr val mem) && config.PtrSize == 8 -> (Select1 (XCHGQ <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem))
(AtomicStorePtrNoWB ptr val mem) && config.PtrSize == 4 -> (Select1 (XCHGL <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem))
(AtomicStorePtrNoWB ptr val mem) -> (Select1 (XCHGQ <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem))
// Atomic exchanges.
(AtomicExchange32 ptr val mem) -> (XCHGL val ptr mem)

View File

@ -50037,70 +50037,28 @@ func rewriteValueAMD64_OpAdd8_0(v *Value) bool {
}
}
func rewriteValueAMD64_OpAddPtr_0(v *Value) bool {
b := v.Block
config := b.Func.Config
// match: (AddPtr x y)
// cond: config.PtrSize == 8
// result: (ADDQ x y)
for {
y := v.Args[1]
x := v.Args[0]
if !(config.PtrSize == 8) {
break
}
v.reset(OpAMD64ADDQ)
v.AddArg(x)
v.AddArg(y)
return true
}
// match: (AddPtr x y)
// cond: config.PtrSize == 4
// result: (ADDL x y)
for {
y := v.Args[1]
x := v.Args[0]
if !(config.PtrSize == 4) {
break
}
v.reset(OpAMD64ADDL)
v.AddArg(x)
v.AddArg(y)
return true
}
return false
}
func rewriteValueAMD64_OpAddr_0(v *Value) bool {
b := v.Block
config := b.Func.Config
// match: (Addr {sym} base)
// cond: config.PtrSize == 8
// result: (LEAQ {sym} base)
for {
sym := v.Aux
base := v.Args[0]
if !(config.PtrSize == 8) {
break
}
v.reset(OpAMD64LEAQ)
v.Aux = sym
v.AddArg(base)
return true
}
// match: (Addr {sym} base)
// cond: config.PtrSize == 4
// result: (LEAL {sym} base)
for {
sym := v.Aux
base := v.Args[0]
if !(config.PtrSize == 4) {
break
}
v.reset(OpAMD64LEAL)
v.Aux = sym
v.AddArg(base)
return true
}
return false
}
func rewriteValueAMD64_OpAnd16_0(v *Value) bool {
// match: (And16 x y)
@ -50311,37 +50269,16 @@ func rewriteValueAMD64_OpAtomicLoad8_0(v *Value) bool {
}
}
func rewriteValueAMD64_OpAtomicLoadPtr_0(v *Value) bool {
b := v.Block
config := b.Func.Config
// match: (AtomicLoadPtr ptr mem)
// cond: config.PtrSize == 8
// result: (MOVQatomicload ptr mem)
for {
mem := v.Args[1]
ptr := v.Args[0]
if !(config.PtrSize == 8) {
break
}
v.reset(OpAMD64MOVQatomicload)
v.AddArg(ptr)
v.AddArg(mem)
return true
}
// match: (AtomicLoadPtr ptr mem)
// cond: config.PtrSize == 4
// result: (MOVLatomicload ptr mem)
for {
mem := v.Args[1]
ptr := v.Args[0]
if !(config.PtrSize == 4) {
break
}
v.reset(OpAMD64MOVLatomicload)
v.AddArg(ptr)
v.AddArg(mem)
return true
}
return false
}
func rewriteValueAMD64_OpAtomicOr8_0(v *Value) bool {
// match: (AtomicOr8 ptr val mem)
@ -50413,18 +50350,13 @@ func rewriteValueAMD64_OpAtomicStore8_0(v *Value) bool {
}
func rewriteValueAMD64_OpAtomicStorePtrNoWB_0(v *Value) bool {
b := v.Block
config := b.Func.Config
typ := &b.Func.Config.Types
// match: (AtomicStorePtrNoWB ptr val mem)
// cond: config.PtrSize == 8
// result: (Select1 (XCHGQ <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem))
for {
mem := v.Args[2]
ptr := v.Args[0]
val := v.Args[1]
if !(config.PtrSize == 8) {
break
}
v.reset(OpSelect1)
v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem))
v0.AddArg(val)
@ -50433,25 +50365,6 @@ func rewriteValueAMD64_OpAtomicStorePtrNoWB_0(v *Value) bool {
v.AddArg(v0)
return true
}
// match: (AtomicStorePtrNoWB ptr val mem)
// cond: config.PtrSize == 4
// result: (Select1 (XCHGL <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem))
for {
mem := v.Args[2]
ptr := v.Args[0]
val := v.Args[1]
if !(config.PtrSize == 4) {
break
}
v.reset(OpSelect1)
v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.BytePtr, types.TypeMem))
v0.AddArg(val)
v0.AddArg(ptr)
v0.AddArg(mem)
v.AddArg(v0)
return true
}
return false
}
func rewriteValueAMD64_OpAvg64u_0(v *Value) bool {
// match: (Avg64u x y)
@ -51774,31 +51687,13 @@ func rewriteValueAMD64_OpConstBool_0(v *Value) bool {
}
}
func rewriteValueAMD64_OpConstNil_0(v *Value) bool {
b := v.Block
config := b.Func.Config
// match: (ConstNil)
// cond: config.PtrSize == 8
// result: (MOVQconst [0])
for {
if !(config.PtrSize == 8) {
break
}
v.reset(OpAMD64MOVQconst)
v.AuxInt = 0
return true
}
// match: (ConstNil)
// cond: config.PtrSize == 4
// result: (MOVLconst [0])
for {
if !(config.PtrSize == 4) {
break
}
v.reset(OpAMD64MOVLconst)
v.AuxInt = 0
return true
}
return false
}
func rewriteValueAMD64_OpCtz16_0(v *Value) bool {
b := v.Block
@ -52303,16 +52198,11 @@ func rewriteValueAMD64_OpEqB_0(v *Value) bool {
}
func rewriteValueAMD64_OpEqPtr_0(v *Value) bool {
b := v.Block
config := b.Func.Config
// match: (EqPtr x y)
// cond: config.PtrSize == 8
// result: (SETEQ (CMPQ x y))
for {
y := v.Args[1]
x := v.Args[0]
if !(config.PtrSize == 8) {
break
}
v.reset(OpAMD64SETEQ)
v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
v0.AddArg(x)
@ -52320,23 +52210,6 @@ func rewriteValueAMD64_OpEqPtr_0(v *Value) bool {
v.AddArg(v0)
return true
}
// match: (EqPtr x y)
// cond: config.PtrSize == 4
// result: (SETEQ (CMPL x y))
for {
y := v.Args[1]
x := v.Args[0]
if !(config.PtrSize == 4) {
break
}
v.reset(OpAMD64SETEQ)
v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
return false
}
func rewriteValueAMD64_OpFloor_0(v *Value) bool {
// match: (Floor x)
@ -52761,16 +52634,11 @@ func rewriteValueAMD64_OpInterCall_0(v *Value) bool {
}
func rewriteValueAMD64_OpIsInBounds_0(v *Value) bool {
b := v.Block
config := b.Func.Config
// match: (IsInBounds idx len)
// cond: config.PtrSize == 8
// result: (SETB (CMPQ idx len))
for {
len := v.Args[1]
idx := v.Args[0]
if !(config.PtrSize == 8) {
break
}
v.reset(OpAMD64SETB)
v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
v0.AddArg(idx)
@ -52778,35 +52646,13 @@ func rewriteValueAMD64_OpIsInBounds_0(v *Value) bool {
v.AddArg(v0)
return true
}
// match: (IsInBounds idx len)
// cond: config.PtrSize == 4
// result: (SETB (CMPL idx len))
for {
len := v.Args[1]
idx := v.Args[0]
if !(config.PtrSize == 4) {
break
}
v.reset(OpAMD64SETB)
v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
v0.AddArg(idx)
v0.AddArg(len)
v.AddArg(v0)
return true
}
return false
}
func rewriteValueAMD64_OpIsNonNil_0(v *Value) bool {
b := v.Block
config := b.Func.Config
// match: (IsNonNil p)
// cond: config.PtrSize == 8
// result: (SETNE (TESTQ p p))
for {
p := v.Args[0]
if !(config.PtrSize == 8) {
break
}
v.reset(OpAMD64SETNE)
v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, types.TypeFlags)
v0.AddArg(p)
@ -52814,35 +52660,14 @@ func rewriteValueAMD64_OpIsNonNil_0(v *Value) bool {
v.AddArg(v0)
return true
}
// match: (IsNonNil p)
// cond: config.PtrSize == 4
// result: (SETNE (TESTL p p))
for {
p := v.Args[0]
if !(config.PtrSize == 4) {
break
}
v.reset(OpAMD64SETNE)
v0 := b.NewValue0(v.Pos, OpAMD64TESTL, types.TypeFlags)
v0.AddArg(p)
v0.AddArg(p)
v.AddArg(v0)
return true
}
return false
}
func rewriteValueAMD64_OpIsSliceInBounds_0(v *Value) bool {
b := v.Block
config := b.Func.Config
// match: (IsSliceInBounds idx len)
// cond: config.PtrSize == 8
// result: (SETBE (CMPQ idx len))
for {
len := v.Args[1]
idx := v.Args[0]
if !(config.PtrSize == 8) {
break
}
v.reset(OpAMD64SETBE)
v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
v0.AddArg(idx)
@ -52850,23 +52675,6 @@ func rewriteValueAMD64_OpIsSliceInBounds_0(v *Value) bool {
v.AddArg(v0)
return true
}
// match: (IsSliceInBounds idx len)
// cond: config.PtrSize == 4
// result: (SETBE (CMPL idx len))
for {
len := v.Args[1]
idx := v.Args[0]
if !(config.PtrSize == 4) {
break
}
v.reset(OpAMD64SETBE)
v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
v0.AddArg(idx)
v0.AddArg(len)
v.AddArg(v0)
return true
}
return false
}
func rewriteValueAMD64_OpLeq16_0(v *Value) bool {
b := v.Block
@ -53169,16 +52977,14 @@ func rewriteValueAMD64_OpLess8U_0(v *Value) bool {
}
}
func rewriteValueAMD64_OpLoad_0(v *Value) bool {
b := v.Block
config := b.Func.Config
// match: (Load <t> ptr mem)
// cond: (is64BitInt(t) || isPtr(t) && config.PtrSize == 8)
// cond: (is64BitInt(t) || isPtr(t))
// result: (MOVQload ptr mem)
for {
t := v.Type
mem := v.Args[1]
ptr := v.Args[0]
if !(is64BitInt(t) || isPtr(t) && config.PtrSize == 8) {
if !(is64BitInt(t) || isPtr(t)) {
break
}
v.reset(OpAMD64MOVQload)
@ -53187,13 +52993,13 @@ func rewriteValueAMD64_OpLoad_0(v *Value) bool {
return true
}
// match: (Load <t> ptr mem)
// cond: (is32BitInt(t) || isPtr(t) && config.PtrSize == 4)
// cond: is32BitInt(t)
// result: (MOVLload ptr mem)
for {
t := v.Type
mem := v.Args[1]
ptr := v.Args[0]
if !(is32BitInt(t) || isPtr(t) && config.PtrSize == 4) {
if !(is32BitInt(t)) {
break
}
v.reset(OpAMD64MOVLload)
@ -53264,39 +53070,17 @@ func rewriteValueAMD64_OpLoad_0(v *Value) bool {
return false
}
func rewriteValueAMD64_OpLocalAddr_0(v *Value) bool {
b := v.Block
config := b.Func.Config
// match: (LocalAddr {sym} base _)
// cond: config.PtrSize == 8
// result: (LEAQ {sym} base)
for {
sym := v.Aux
_ = v.Args[1]
base := v.Args[0]
if !(config.PtrSize == 8) {
break
}
v.reset(OpAMD64LEAQ)
v.Aux = sym
v.AddArg(base)
return true
}
// match: (LocalAddr {sym} base _)
// cond: config.PtrSize == 4
// result: (LEAL {sym} base)
for {
sym := v.Aux
_ = v.Args[1]
base := v.Args[0]
if !(config.PtrSize == 4) {
break
}
v.reset(OpAMD64LEAL)
v.Aux = sym
v.AddArg(base)
return true
}
return false
}
func rewriteValueAMD64_OpLsh16x16_0(v *Value) bool {
b := v.Block
@ -54959,16 +54743,11 @@ func rewriteValueAMD64_OpNeqB_0(v *Value) bool {
}
func rewriteValueAMD64_OpNeqPtr_0(v *Value) bool {
b := v.Block
config := b.Func.Config
// match: (NeqPtr x y)
// cond: config.PtrSize == 8
// result: (SETNE (CMPQ x y))
for {
y := v.Args[1]
x := v.Args[0]
if !(config.PtrSize == 8) {
break
}
v.reset(OpAMD64SETNE)
v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
v0.AddArg(x)
@ -54976,23 +54755,6 @@ func rewriteValueAMD64_OpNeqPtr_0(v *Value) bool {
v.AddArg(v0)
return true
}
// match: (NeqPtr x y)
// cond: config.PtrSize == 4
// result: (SETNE (CMPL x y))
for {
y := v.Args[1]
x := v.Args[0]
if !(config.PtrSize == 4) {
break
}
v.reset(OpAMD64SETNE)
v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
return false
}
func rewriteValueAMD64_OpNilCheck_0(v *Value) bool {
// match: (NilCheck ptr mem)
@ -55019,15 +54781,14 @@ func rewriteValueAMD64_OpNot_0(v *Value) bool {
}
func rewriteValueAMD64_OpOffPtr_0(v *Value) bool {
b := v.Block
config := b.Func.Config
typ := &b.Func.Config.Types
// match: (OffPtr [off] ptr)
// cond: config.PtrSize == 8 && is32Bit(off)
// cond: is32Bit(off)
// result: (ADDQconst [off] ptr)
for {
off := v.AuxInt
ptr := v.Args[0]
if !(config.PtrSize == 8 && is32Bit(off)) {
if !(is32Bit(off)) {
break
}
v.reset(OpAMD64ADDQconst)
@ -55036,14 +54797,10 @@ func rewriteValueAMD64_OpOffPtr_0(v *Value) bool {
return true
}
// match: (OffPtr [off] ptr)
// cond: config.PtrSize == 8
// result: (ADDQ (MOVQconst [off]) ptr)
for {
off := v.AuxInt
ptr := v.Args[0]
if !(config.PtrSize == 8) {
break
}
v.reset(OpAMD64ADDQ)
v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
v0.AuxInt = off
@ -55051,21 +54808,6 @@ func rewriteValueAMD64_OpOffPtr_0(v *Value) bool {
v.AddArg(ptr)
return true
}
// match: (OffPtr [off] ptr)
// cond: config.PtrSize == 4
// result: (ADDLconst [off] ptr)
for {
off := v.AuxInt
ptr := v.Args[0]
if !(config.PtrSize == 4) {
break
}
v.reset(OpAMD64ADDLconst)
v.AuxInt = off
v.AddArg(ptr)
return true
}
return false
}
func rewriteValueAMD64_OpOr16_0(v *Value) bool {
// match: (Or16 x y)
@ -57223,37 +56965,16 @@ func rewriteValueAMD64_OpSub8_0(v *Value) bool {
}
}
func rewriteValueAMD64_OpSubPtr_0(v *Value) bool {
b := v.Block
config := b.Func.Config
// match: (SubPtr x y)
// cond: config.PtrSize == 8
// result: (SUBQ x y)
for {
y := v.Args[1]
x := v.Args[0]
if !(config.PtrSize == 8) {
break
}
v.reset(OpAMD64SUBQ)
v.AddArg(x)
v.AddArg(y)
return true
}
// match: (SubPtr x y)
// cond: config.PtrSize == 4
// result: (SUBL x y)
for {
y := v.Args[1]
x := v.Args[0]
if !(config.PtrSize == 4) {
break
}
v.reset(OpAMD64SUBL)
v.AddArg(x)
v.AddArg(y)
return true
}
return false
}
func rewriteValueAMD64_OpTrunc_0(v *Value) bool {
// match: (Trunc x)