1
0
mirror of https://github.com/golang/go synced 2024-09-29 22:24:33 -06:00

cmd/compile: convert last amd64 rules to typed aux

Passes

  gotip build -toolexec 'toolstash -cmp' -a std

Change-Id: I196d3bdef4a4b650534a4ddd3053e65e0846fdcc
Reviewed-on: https://go-review.googlesource.com/c/go/+/257898
Reviewed-by: Keith Randall <khr@golang.org>
Trust: Alberto Donizetti <alb.donizetti@gmail.com>
This commit is contained in:
Alberto Donizetti 2020-09-27 19:13:24 +02:00
parent 0ab72ed020
commit 39a426d356
2 changed files with 173 additions and 158 deletions

View File

@ -1278,9 +1278,12 @@
(CMPBconst (ANDLconst _ [m]) [n]) && 0 <= m && int8(m) < n => (FlagLT_ULT) (CMPBconst (ANDLconst _ [m]) [n]) && 0 <= m && int8(m) < n => (FlagLT_ULT)
// TESTQ c c sets flags like CMPQ c 0. // TESTQ c c sets flags like CMPQ c 0.
(TEST(Q|L)const [c] (MOV(Q|L)const [c])) && c == 0 -> (FlagEQ) (TESTQconst [c] (MOVQconst [d])) && int64(c) == d && c == 0 => (FlagEQ)
(TEST(Q|L)const [c] (MOV(Q|L)const [c])) && c < 0 -> (FlagLT_UGT) (TESTLconst [c] (MOVLconst [c])) && c == 0 => (FlagEQ)
(TEST(Q|L)const [c] (MOV(Q|L)const [c])) && c > 0 -> (FlagGT_UGT) (TESTQconst [c] (MOVQconst [d])) && int64(c) == d && c < 0 => (FlagLT_UGT)
(TESTLconst [c] (MOVLconst [c])) && c < 0 => (FlagLT_UGT)
(TESTQconst [c] (MOVQconst [d])) && int64(c) == d && c > 0 => (FlagGT_UGT)
(TESTLconst [c] (MOVLconst [c])) && c > 0 => (FlagGT_UGT)
// TODO: DIVxU also. // TODO: DIVxU also.
@ -2157,13 +2160,13 @@
// If possible, put a rematerializeable value in the first argument slot, // If possible, put a rematerializeable value in the first argument slot,
// to reduce the odds that another value will be have to spilled // to reduce the odds that another value will be have to spilled
// specifically to free up AX. // specifically to free up AX.
(HMUL(Q|L) x y) && !x.rematerializeable() && y.rematerializeable() -> (HMUL(Q|L) y x) (HMUL(Q|L) x y) && !x.rematerializeable() && y.rematerializeable() => (HMUL(Q|L) y x)
(HMUL(Q|L)U x y) && !x.rematerializeable() && y.rematerializeable() -> (HMUL(Q|L)U y x) (HMUL(Q|L)U x y) && !x.rematerializeable() && y.rematerializeable() => (HMUL(Q|L)U y x)
// Fold loads into compares // Fold loads into compares
// Note: these may be undone by the flagalloc pass. // Note: these may be undone by the flagalloc pass.
(CMP(Q|L|W|B) l:(MOV(Q|L|W|B)load {sym} [off] ptr mem) x) && canMergeLoad(v, l) && clobber(l) -> (CMP(Q|L|W|B)load {sym} [off] ptr x mem) (CMP(Q|L|W|B) l:(MOV(Q|L|W|B)load {sym} [off] ptr mem) x) && canMergeLoad(v, l) && clobber(l) => (CMP(Q|L|W|B)load {sym} [off] ptr x mem)
(CMP(Q|L|W|B) x l:(MOV(Q|L|W|B)load {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) -> (InvertFlags (CMP(Q|L|W|B)load {sym} [off] ptr x mem)) (CMP(Q|L|W|B) x l:(MOV(Q|L|W|B)load {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (InvertFlags (CMP(Q|L|W|B)load {sym} [off] ptr x mem))
(CMP(Q|L)const l:(MOV(Q|L)load {sym} [off] ptr mem) [c]) (CMP(Q|L)const l:(MOV(Q|L)load {sym} [off] ptr mem) [c])
&& l.Uses == 1 && l.Uses == 1
@ -2174,22 +2177,22 @@
&& clobber(l) => && clobber(l) =>
@l.Block (CMP(W|B)constload {sym} [makeValAndOff32(int32(c),off)] ptr mem) @l.Block (CMP(W|B)constload {sym} [makeValAndOff32(int32(c),off)] ptr mem)
(CMPQload {sym} [off] ptr (MOVQconst [c]) mem) && validValAndOff(c,off) -> (CMPQconstload {sym} [makeValAndOff(c,off)] ptr mem) (CMPQload {sym} [off] ptr (MOVQconst [c]) mem) && validValAndOff(c,int64(off)) => (CMPQconstload {sym} [makeValAndOff64(c,int64(off))] ptr mem)
(CMPLload {sym} [off] ptr (MOVLconst [c]) mem) && validValAndOff(c,off) -> (CMPLconstload {sym} [makeValAndOff(c,off)] ptr mem) (CMPLload {sym} [off] ptr (MOVLconst [c]) mem) && validValAndOff(int64(c),int64(off)) => (CMPLconstload {sym} [makeValAndOff32(c,off)] ptr mem)
(CMPWload {sym} [off] ptr (MOVLconst [c]) mem) && validValAndOff(int64(int16(c)),off) -> (CMPWconstload {sym} [makeValAndOff(int64(int16(c)),off)] ptr mem) (CMPWload {sym} [off] ptr (MOVLconst [c]) mem) && validValAndOff(int64(int16(c)),int64(off)) => (CMPWconstload {sym} [makeValAndOff32(int32(int16(c)),off)] ptr mem)
(CMPBload {sym} [off] ptr (MOVLconst [c]) mem) && validValAndOff(int64(int8(c)),off) -> (CMPBconstload {sym} [makeValAndOff(int64(int8(c)),off)] ptr mem) (CMPBload {sym} [off] ptr (MOVLconst [c]) mem) && validValAndOff(int64(int8(c)),int64(off)) => (CMPBconstload {sym} [makeValAndOff32(int32(int8(c)),off)] ptr mem)
(TEST(Q|L|W|B) l:(MOV(Q|L|W|B)load {sym} [off] ptr mem) l2) (TEST(Q|L|W|B) l:(MOV(Q|L|W|B)load {sym} [off] ptr mem) l2)
&& l == l2 && l == l2
&& l.Uses == 2 && l.Uses == 2
&& validValAndOff(0,off) && validValAndOff(0, int64(off))
&& clobber(l) -> && clobber(l) =>
@l.Block (CMP(Q|L|W|B)constload {sym} [makeValAndOff(0,off)] ptr mem) @l.Block (CMP(Q|L|W|B)constload {sym} [makeValAndOff64(0, int64(off))] ptr mem)
(MOVBload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVLconst [int64(read8(sym, off))]) (MOVBload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(read8(sym, int64(off)))])
(MOVWload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVLconst [int64(read16(sym, off, config.ctxt.Arch.ByteOrder))]) (MOVWload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
(MOVLload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVQconst [int64(read32(sym, off, config.ctxt.Arch.ByteOrder))]) (MOVLload [off] {sym} (SB) _) && symIsRO(sym) => (MOVQconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
(MOVQload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVQconst [int64(read64(sym, off, config.ctxt.Arch.ByteOrder))]) (MOVQload [off] {sym} (SB) _) && symIsRO(sym) => (MOVQconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))])
(MOVOstore [dstOff] {dstSym} ptr (MOVOload [srcOff] {srcSym} (SB) _) mem) && symIsRO(srcSym) -> (MOVOstore [dstOff] {dstSym} ptr (MOVOload [srcOff] {srcSym} (SB) _) mem) && symIsRO(srcSym) =>
(MOVQstore [dstOff+8] {dstSym} ptr (MOVQconst [int64(read64(srcSym, srcOff+8, config.ctxt.Arch.ByteOrder))]) (MOVQstore [dstOff+8] {dstSym} ptr (MOVQconst [int64(read64(srcSym, int64(srcOff)+8, config.ctxt.Arch.ByteOrder))])
(MOVQstore [dstOff] {dstSym} ptr (MOVQconst [int64(read64(srcSym, srcOff, config.ctxt.Arch.ByteOrder))]) mem)) (MOVQstore [dstOff] {dstSym} ptr (MOVQconst [int64(read64(srcSym, int64(srcOff), config.ctxt.Arch.ByteOrder))]) mem))

View File

@ -6767,8 +6767,8 @@ func rewriteValueAMD64_OpAMD64CMPB(v *Value) bool {
if l.Op != OpAMD64MOVBload { if l.Op != OpAMD64MOVBload {
break break
} }
off := l.AuxInt off := auxIntToInt32(l.AuxInt)
sym := l.Aux sym := auxToSym(l.Aux)
mem := l.Args[1] mem := l.Args[1]
ptr := l.Args[0] ptr := l.Args[0]
x := v_1 x := v_1
@ -6776,8 +6776,8 @@ func rewriteValueAMD64_OpAMD64CMPB(v *Value) bool {
break break
} }
v.reset(OpAMD64CMPBload) v.reset(OpAMD64CMPBload)
v.AuxInt = off v.AuxInt = int32ToAuxInt(off)
v.Aux = sym v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem) v.AddArg3(ptr, x, mem)
return true return true
} }
@ -6790,8 +6790,8 @@ func rewriteValueAMD64_OpAMD64CMPB(v *Value) bool {
if l.Op != OpAMD64MOVBload { if l.Op != OpAMD64MOVBload {
break break
} }
off := l.AuxInt off := auxIntToInt32(l.AuxInt)
sym := l.Aux sym := auxToSym(l.Aux)
mem := l.Args[1] mem := l.Args[1]
ptr := l.Args[0] ptr := l.Args[0]
if !(canMergeLoad(v, l) && clobber(l)) { if !(canMergeLoad(v, l) && clobber(l)) {
@ -6799,8 +6799,8 @@ func rewriteValueAMD64_OpAMD64CMPB(v *Value) bool {
} }
v.reset(OpAMD64InvertFlags) v.reset(OpAMD64InvertFlags)
v0 := b.NewValue0(l.Pos, OpAMD64CMPBload, types.TypeFlags) v0 := b.NewValue0(l.Pos, OpAMD64CMPBload, types.TypeFlags)
v0.AuxInt = off v0.AuxInt = int32ToAuxInt(off)
v0.Aux = sym v0.Aux = symToAux(sym)
v0.AddArg3(ptr, x, mem) v0.AddArg3(ptr, x, mem)
v.AddArg(v0) v.AddArg(v0)
return true return true
@ -7076,23 +7076,23 @@ func rewriteValueAMD64_OpAMD64CMPBload(v *Value) bool {
return true return true
} }
// match: (CMPBload {sym} [off] ptr (MOVLconst [c]) mem) // match: (CMPBload {sym} [off] ptr (MOVLconst [c]) mem)
// cond: validValAndOff(int64(int8(c)),off) // cond: validValAndOff(int64(int8(c)),int64(off))
// result: (CMPBconstload {sym} [makeValAndOff(int64(int8(c)),off)] ptr mem) // result: (CMPBconstload {sym} [makeValAndOff32(int32(int8(c)),off)] ptr mem)
for { for {
off := v.AuxInt off := auxIntToInt32(v.AuxInt)
sym := v.Aux sym := auxToSym(v.Aux)
ptr := v_0 ptr := v_0
if v_1.Op != OpAMD64MOVLconst { if v_1.Op != OpAMD64MOVLconst {
break break
} }
c := v_1.AuxInt c := auxIntToInt32(v_1.AuxInt)
mem := v_2 mem := v_2
if !(validValAndOff(int64(int8(c)), off)) { if !(validValAndOff(int64(int8(c)), int64(off))) {
break break
} }
v.reset(OpAMD64CMPBconstload) v.reset(OpAMD64CMPBconstload)
v.AuxInt = makeValAndOff(int64(int8(c)), off) v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(int8(c)), off))
v.Aux = sym v.Aux = symToAux(sym)
v.AddArg2(ptr, mem) v.AddArg2(ptr, mem)
return true return true
} }
@ -7153,8 +7153,8 @@ func rewriteValueAMD64_OpAMD64CMPL(v *Value) bool {
if l.Op != OpAMD64MOVLload { if l.Op != OpAMD64MOVLload {
break break
} }
off := l.AuxInt off := auxIntToInt32(l.AuxInt)
sym := l.Aux sym := auxToSym(l.Aux)
mem := l.Args[1] mem := l.Args[1]
ptr := l.Args[0] ptr := l.Args[0]
x := v_1 x := v_1
@ -7162,8 +7162,8 @@ func rewriteValueAMD64_OpAMD64CMPL(v *Value) bool {
break break
} }
v.reset(OpAMD64CMPLload) v.reset(OpAMD64CMPLload)
v.AuxInt = off v.AuxInt = int32ToAuxInt(off)
v.Aux = sym v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem) v.AddArg3(ptr, x, mem)
return true return true
} }
@ -7176,8 +7176,8 @@ func rewriteValueAMD64_OpAMD64CMPL(v *Value) bool {
if l.Op != OpAMD64MOVLload { if l.Op != OpAMD64MOVLload {
break break
} }
off := l.AuxInt off := auxIntToInt32(l.AuxInt)
sym := l.Aux sym := auxToSym(l.Aux)
mem := l.Args[1] mem := l.Args[1]
ptr := l.Args[0] ptr := l.Args[0]
if !(canMergeLoad(v, l) && clobber(l)) { if !(canMergeLoad(v, l) && clobber(l)) {
@ -7185,8 +7185,8 @@ func rewriteValueAMD64_OpAMD64CMPL(v *Value) bool {
} }
v.reset(OpAMD64InvertFlags) v.reset(OpAMD64InvertFlags)
v0 := b.NewValue0(l.Pos, OpAMD64CMPLload, types.TypeFlags) v0 := b.NewValue0(l.Pos, OpAMD64CMPLload, types.TypeFlags)
v0.AuxInt = off v0.AuxInt = int32ToAuxInt(off)
v0.Aux = sym v0.Aux = symToAux(sym)
v0.AddArg3(ptr, x, mem) v0.AddArg3(ptr, x, mem)
v.AddArg(v0) v.AddArg(v0)
return true return true
@ -7477,23 +7477,23 @@ func rewriteValueAMD64_OpAMD64CMPLload(v *Value) bool {
return true return true
} }
// match: (CMPLload {sym} [off] ptr (MOVLconst [c]) mem) // match: (CMPLload {sym} [off] ptr (MOVLconst [c]) mem)
// cond: validValAndOff(c,off) // cond: validValAndOff(int64(c),int64(off))
// result: (CMPLconstload {sym} [makeValAndOff(c,off)] ptr mem) // result: (CMPLconstload {sym} [makeValAndOff32(c,off)] ptr mem)
for { for {
off := v.AuxInt off := auxIntToInt32(v.AuxInt)
sym := v.Aux sym := auxToSym(v.Aux)
ptr := v_0 ptr := v_0
if v_1.Op != OpAMD64MOVLconst { if v_1.Op != OpAMD64MOVLconst {
break break
} }
c := v_1.AuxInt c := auxIntToInt32(v_1.AuxInt)
mem := v_2 mem := v_2
if !(validValAndOff(c, off)) { if !(validValAndOff(int64(c), int64(off))) {
break break
} }
v.reset(OpAMD64CMPLconstload) v.reset(OpAMD64CMPLconstload)
v.AuxInt = makeValAndOff(c, off) v.AuxInt = valAndOffToAuxInt(makeValAndOff32(c, off))
v.Aux = sym v.Aux = symToAux(sym)
v.AddArg2(ptr, mem) v.AddArg2(ptr, mem)
return true return true
} }
@ -7652,8 +7652,8 @@ func rewriteValueAMD64_OpAMD64CMPQ(v *Value) bool {
if l.Op != OpAMD64MOVQload { if l.Op != OpAMD64MOVQload {
break break
} }
off := l.AuxInt off := auxIntToInt32(l.AuxInt)
sym := l.Aux sym := auxToSym(l.Aux)
mem := l.Args[1] mem := l.Args[1]
ptr := l.Args[0] ptr := l.Args[0]
x := v_1 x := v_1
@ -7661,8 +7661,8 @@ func rewriteValueAMD64_OpAMD64CMPQ(v *Value) bool {
break break
} }
v.reset(OpAMD64CMPQload) v.reset(OpAMD64CMPQload)
v.AuxInt = off v.AuxInt = int32ToAuxInt(off)
v.Aux = sym v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem) v.AddArg3(ptr, x, mem)
return true return true
} }
@ -7675,8 +7675,8 @@ func rewriteValueAMD64_OpAMD64CMPQ(v *Value) bool {
if l.Op != OpAMD64MOVQload { if l.Op != OpAMD64MOVQload {
break break
} }
off := l.AuxInt off := auxIntToInt32(l.AuxInt)
sym := l.Aux sym := auxToSym(l.Aux)
mem := l.Args[1] mem := l.Args[1]
ptr := l.Args[0] ptr := l.Args[0]
if !(canMergeLoad(v, l) && clobber(l)) { if !(canMergeLoad(v, l) && clobber(l)) {
@ -7684,8 +7684,8 @@ func rewriteValueAMD64_OpAMD64CMPQ(v *Value) bool {
} }
v.reset(OpAMD64InvertFlags) v.reset(OpAMD64InvertFlags)
v0 := b.NewValue0(l.Pos, OpAMD64CMPQload, types.TypeFlags) v0 := b.NewValue0(l.Pos, OpAMD64CMPQload, types.TypeFlags)
v0.AuxInt = off v0.AuxInt = int32ToAuxInt(off)
v0.Aux = sym v0.Aux = symToAux(sym)
v0.AddArg3(ptr, x, mem) v0.AddArg3(ptr, x, mem)
v.AddArg(v0) v.AddArg(v0)
return true return true
@ -8047,23 +8047,23 @@ func rewriteValueAMD64_OpAMD64CMPQload(v *Value) bool {
return true return true
} }
// match: (CMPQload {sym} [off] ptr (MOVQconst [c]) mem) // match: (CMPQload {sym} [off] ptr (MOVQconst [c]) mem)
// cond: validValAndOff(c,off) // cond: validValAndOff(c,int64(off))
// result: (CMPQconstload {sym} [makeValAndOff(c,off)] ptr mem) // result: (CMPQconstload {sym} [makeValAndOff64(c,int64(off))] ptr mem)
for { for {
off := v.AuxInt off := auxIntToInt32(v.AuxInt)
sym := v.Aux sym := auxToSym(v.Aux)
ptr := v_0 ptr := v_0
if v_1.Op != OpAMD64MOVQconst { if v_1.Op != OpAMD64MOVQconst {
break break
} }
c := v_1.AuxInt c := auxIntToInt64(v_1.AuxInt)
mem := v_2 mem := v_2
if !(validValAndOff(c, off)) { if !(validValAndOff(c, int64(off))) {
break break
} }
v.reset(OpAMD64CMPQconstload) v.reset(OpAMD64CMPQconstload)
v.AuxInt = makeValAndOff(c, off) v.AuxInt = valAndOffToAuxInt(makeValAndOff64(c, int64(off)))
v.Aux = sym v.Aux = symToAux(sym)
v.AddArg2(ptr, mem) v.AddArg2(ptr, mem)
return true return true
} }
@ -8124,8 +8124,8 @@ func rewriteValueAMD64_OpAMD64CMPW(v *Value) bool {
if l.Op != OpAMD64MOVWload { if l.Op != OpAMD64MOVWload {
break break
} }
off := l.AuxInt off := auxIntToInt32(l.AuxInt)
sym := l.Aux sym := auxToSym(l.Aux)
mem := l.Args[1] mem := l.Args[1]
ptr := l.Args[0] ptr := l.Args[0]
x := v_1 x := v_1
@ -8133,8 +8133,8 @@ func rewriteValueAMD64_OpAMD64CMPW(v *Value) bool {
break break
} }
v.reset(OpAMD64CMPWload) v.reset(OpAMD64CMPWload)
v.AuxInt = off v.AuxInt = int32ToAuxInt(off)
v.Aux = sym v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem) v.AddArg3(ptr, x, mem)
return true return true
} }
@ -8147,8 +8147,8 @@ func rewriteValueAMD64_OpAMD64CMPW(v *Value) bool {
if l.Op != OpAMD64MOVWload { if l.Op != OpAMD64MOVWload {
break break
} }
off := l.AuxInt off := auxIntToInt32(l.AuxInt)
sym := l.Aux sym := auxToSym(l.Aux)
mem := l.Args[1] mem := l.Args[1]
ptr := l.Args[0] ptr := l.Args[0]
if !(canMergeLoad(v, l) && clobber(l)) { if !(canMergeLoad(v, l) && clobber(l)) {
@ -8156,8 +8156,8 @@ func rewriteValueAMD64_OpAMD64CMPW(v *Value) bool {
} }
v.reset(OpAMD64InvertFlags) v.reset(OpAMD64InvertFlags)
v0 := b.NewValue0(l.Pos, OpAMD64CMPWload, types.TypeFlags) v0 := b.NewValue0(l.Pos, OpAMD64CMPWload, types.TypeFlags)
v0.AuxInt = off v0.AuxInt = int32ToAuxInt(off)
v0.Aux = sym v0.Aux = symToAux(sym)
v0.AddArg3(ptr, x, mem) v0.AddArg3(ptr, x, mem)
v.AddArg(v0) v.AddArg(v0)
return true return true
@ -8433,23 +8433,23 @@ func rewriteValueAMD64_OpAMD64CMPWload(v *Value) bool {
return true return true
} }
// match: (CMPWload {sym} [off] ptr (MOVLconst [c]) mem) // match: (CMPWload {sym} [off] ptr (MOVLconst [c]) mem)
// cond: validValAndOff(int64(int16(c)),off) // cond: validValAndOff(int64(int16(c)),int64(off))
// result: (CMPWconstload {sym} [makeValAndOff(int64(int16(c)),off)] ptr mem) // result: (CMPWconstload {sym} [makeValAndOff32(int32(int16(c)),off)] ptr mem)
for { for {
off := v.AuxInt off := auxIntToInt32(v.AuxInt)
sym := v.Aux sym := auxToSym(v.Aux)
ptr := v_0 ptr := v_0
if v_1.Op != OpAMD64MOVLconst { if v_1.Op != OpAMD64MOVLconst {
break break
} }
c := v_1.AuxInt c := auxIntToInt32(v_1.AuxInt)
mem := v_2 mem := v_2
if !(validValAndOff(int64(int16(c)), off)) { if !(validValAndOff(int64(int16(c)), int64(off))) {
break break
} }
v.reset(OpAMD64CMPWconstload) v.reset(OpAMD64CMPWconstload)
v.AuxInt = makeValAndOff(int64(int16(c)), off) v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(int16(c)), off))
v.Aux = sym v.Aux = symToAux(sym)
v.AddArg2(ptr, mem) v.AddArg2(ptr, mem)
return true return true
} }
@ -10296,15 +10296,15 @@ func rewriteValueAMD64_OpAMD64MOVBload(v *Value) bool {
} }
// match: (MOVBload [off] {sym} (SB) _) // match: (MOVBload [off] {sym} (SB) _)
// cond: symIsRO(sym) // cond: symIsRO(sym)
// result: (MOVLconst [int64(read8(sym, off))]) // result: (MOVLconst [int32(read8(sym, int64(off)))])
for { for {
off := v.AuxInt off := auxIntToInt32(v.AuxInt)
sym := v.Aux sym := auxToSym(v.Aux)
if v_0.Op != OpSB || !(symIsRO(sym)) { if v_0.Op != OpSB || !(symIsRO(sym)) {
break break
} }
v.reset(OpAMD64MOVLconst) v.reset(OpAMD64MOVLconst)
v.AuxInt = int64(read8(sym, off)) v.AuxInt = int32ToAuxInt(int32(read8(sym, int64(off))))
return true return true
} }
return false return false
@ -12124,15 +12124,15 @@ func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool {
} }
// match: (MOVLload [off] {sym} (SB) _) // match: (MOVLload [off] {sym} (SB) _)
// cond: symIsRO(sym) // cond: symIsRO(sym)
// result: (MOVQconst [int64(read32(sym, off, config.ctxt.Arch.ByteOrder))]) // result: (MOVQconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
for { for {
off := v.AuxInt off := auxIntToInt32(v.AuxInt)
sym := v.Aux sym := auxToSym(v.Aux)
if v_0.Op != OpSB || !(symIsRO(sym)) { if v_0.Op != OpSB || !(symIsRO(sym)) {
break break
} }
v.reset(OpAMD64MOVQconst) v.reset(OpAMD64MOVQconst)
v.AuxInt = int64(read32(sym, off, config.ctxt.Arch.ByteOrder)) v.AuxInt = int64ToAuxInt(int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))
return true return true
} }
return false return false
@ -13240,16 +13240,16 @@ func rewriteValueAMD64_OpAMD64MOVOstore(v *Value) bool {
} }
// match: (MOVOstore [dstOff] {dstSym} ptr (MOVOload [srcOff] {srcSym} (SB) _) mem) // match: (MOVOstore [dstOff] {dstSym} ptr (MOVOload [srcOff] {srcSym} (SB) _) mem)
// cond: symIsRO(srcSym) // cond: symIsRO(srcSym)
// result: (MOVQstore [dstOff+8] {dstSym} ptr (MOVQconst [int64(read64(srcSym, srcOff+8, config.ctxt.Arch.ByteOrder))]) (MOVQstore [dstOff] {dstSym} ptr (MOVQconst [int64(read64(srcSym, srcOff, config.ctxt.Arch.ByteOrder))]) mem)) // result: (MOVQstore [dstOff+8] {dstSym} ptr (MOVQconst [int64(read64(srcSym, int64(srcOff)+8, config.ctxt.Arch.ByteOrder))]) (MOVQstore [dstOff] {dstSym} ptr (MOVQconst [int64(read64(srcSym, int64(srcOff), config.ctxt.Arch.ByteOrder))]) mem))
for { for {
dstOff := v.AuxInt dstOff := auxIntToInt32(v.AuxInt)
dstSym := v.Aux dstSym := auxToSym(v.Aux)
ptr := v_0 ptr := v_0
if v_1.Op != OpAMD64MOVOload { if v_1.Op != OpAMD64MOVOload {
break break
} }
srcOff := v_1.AuxInt srcOff := auxIntToInt32(v_1.AuxInt)
srcSym := v_1.Aux srcSym := auxToSym(v_1.Aux)
v_1_0 := v_1.Args[0] v_1_0 := v_1.Args[0]
if v_1_0.Op != OpSB { if v_1_0.Op != OpSB {
break break
@ -13259,15 +13259,15 @@ func rewriteValueAMD64_OpAMD64MOVOstore(v *Value) bool {
break break
} }
v.reset(OpAMD64MOVQstore) v.reset(OpAMD64MOVQstore)
v.AuxInt = dstOff + 8 v.AuxInt = int32ToAuxInt(dstOff + 8)
v.Aux = dstSym v.Aux = symToAux(dstSym)
v0 := b.NewValue0(v_1.Pos, OpAMD64MOVQconst, typ.UInt64) v0 := b.NewValue0(v_1.Pos, OpAMD64MOVQconst, typ.UInt64)
v0.AuxInt = int64(read64(srcSym, srcOff+8, config.ctxt.Arch.ByteOrder)) v0.AuxInt = int64ToAuxInt(int64(read64(srcSym, int64(srcOff)+8, config.ctxt.Arch.ByteOrder)))
v1 := b.NewValue0(v_1.Pos, OpAMD64MOVQstore, types.TypeMem) v1 := b.NewValue0(v_1.Pos, OpAMD64MOVQstore, types.TypeMem)
v1.AuxInt = dstOff v1.AuxInt = int32ToAuxInt(dstOff)
v1.Aux = dstSym v1.Aux = symToAux(dstSym)
v2 := b.NewValue0(v_1.Pos, OpAMD64MOVQconst, typ.UInt64) v2 := b.NewValue0(v_1.Pos, OpAMD64MOVQconst, typ.UInt64)
v2.AuxInt = int64(read64(srcSym, srcOff, config.ctxt.Arch.ByteOrder)) v2.AuxInt = int64ToAuxInt(int64(read64(srcSym, int64(srcOff), config.ctxt.Arch.ByteOrder)))
v1.AddArg3(ptr, v2, mem) v1.AddArg3(ptr, v2, mem)
v.AddArg3(ptr, v0, v1) v.AddArg3(ptr, v0, v1)
return true return true
@ -13504,15 +13504,15 @@ func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool {
} }
// match: (MOVQload [off] {sym} (SB) _) // match: (MOVQload [off] {sym} (SB) _)
// cond: symIsRO(sym) // cond: symIsRO(sym)
// result: (MOVQconst [int64(read64(sym, off, config.ctxt.Arch.ByteOrder))]) // result: (MOVQconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))])
for { for {
off := v.AuxInt off := auxIntToInt32(v.AuxInt)
sym := v.Aux sym := auxToSym(v.Aux)
if v_0.Op != OpSB || !(symIsRO(sym)) { if v_0.Op != OpSB || !(symIsRO(sym)) {
break break
} }
v.reset(OpAMD64MOVQconst) v.reset(OpAMD64MOVQconst)
v.AuxInt = int64(read64(sym, off, config.ctxt.Arch.ByteOrder)) v.AuxInt = int64ToAuxInt(int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder)))
return true return true
} }
return false return false
@ -14953,15 +14953,15 @@ func rewriteValueAMD64_OpAMD64MOVWload(v *Value) bool {
} }
// match: (MOVWload [off] {sym} (SB) _) // match: (MOVWload [off] {sym} (SB) _)
// cond: symIsRO(sym) // cond: symIsRO(sym)
// result: (MOVLconst [int64(read16(sym, off, config.ctxt.Arch.ByteOrder))]) // result: (MOVLconst [int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
for { for {
off := v.AuxInt off := auxIntToInt32(v.AuxInt)
sym := v.Aux sym := auxToSym(v.Aux)
if v_0.Op != OpSB || !(symIsRO(sym)) { if v_0.Op != OpSB || !(symIsRO(sym)) {
break break
} }
v.reset(OpAMD64MOVLconst) v.reset(OpAMD64MOVLconst)
v.AuxInt = int64(read16(sym, off, config.ctxt.Arch.ByteOrder)) v.AuxInt = int32ToAuxInt(int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))
return true return true
} }
return false return false
@ -27044,27 +27044,27 @@ func rewriteValueAMD64_OpAMD64TESTB(v *Value) bool {
break break
} }
// match: (TESTB l:(MOVBload {sym} [off] ptr mem) l2) // match: (TESTB l:(MOVBload {sym} [off] ptr mem) l2)
// cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l) // cond: l == l2 && l.Uses == 2 && validValAndOff(0, int64(off)) && clobber(l)
// result: @l.Block (CMPBconstload {sym} [makeValAndOff(0,off)] ptr mem) // result: @l.Block (CMPBconstload {sym} [makeValAndOff64(0, int64(off))] ptr mem)
for { for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
l := v_0 l := v_0
if l.Op != OpAMD64MOVBload { if l.Op != OpAMD64MOVBload {
continue continue
} }
off := l.AuxInt off := auxIntToInt32(l.AuxInt)
sym := l.Aux sym := auxToSym(l.Aux)
mem := l.Args[1] mem := l.Args[1]
ptr := l.Args[0] ptr := l.Args[0]
l2 := v_1 l2 := v_1
if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) { if !(l == l2 && l.Uses == 2 && validValAndOff(0, int64(off)) && clobber(l)) {
continue continue
} }
b = l.Block b = l.Block
v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags) v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags)
v.copyOf(v0) v.copyOf(v0)
v0.AuxInt = makeValAndOff(0, off) v0.AuxInt = valAndOffToAuxInt(makeValAndOff64(0, int64(off)))
v0.Aux = sym v0.Aux = symToAux(sym)
v0.AddArg2(ptr, mem) v0.AddArg2(ptr, mem)
return true return true
} }
@ -27112,27 +27112,27 @@ func rewriteValueAMD64_OpAMD64TESTL(v *Value) bool {
break break
} }
// match: (TESTL l:(MOVLload {sym} [off] ptr mem) l2) // match: (TESTL l:(MOVLload {sym} [off] ptr mem) l2)
// cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l) // cond: l == l2 && l.Uses == 2 && validValAndOff(0, int64(off)) && clobber(l)
// result: @l.Block (CMPLconstload {sym} [makeValAndOff(0,off)] ptr mem) // result: @l.Block (CMPLconstload {sym} [makeValAndOff64(0, int64(off))] ptr mem)
for { for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
l := v_0 l := v_0
if l.Op != OpAMD64MOVLload { if l.Op != OpAMD64MOVLload {
continue continue
} }
off := l.AuxInt off := auxIntToInt32(l.AuxInt)
sym := l.Aux sym := auxToSym(l.Aux)
mem := l.Args[1] mem := l.Args[1]
ptr := l.Args[0] ptr := l.Args[0]
l2 := v_1 l2 := v_1
if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) { if !(l == l2 && l.Uses == 2 && validValAndOff(0, int64(off)) && clobber(l)) {
continue continue
} }
b = l.Block b = l.Block
v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags) v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags)
v.copyOf(v0) v.copyOf(v0)
v0.AuxInt = makeValAndOff(0, off) v0.AuxInt = valAndOffToAuxInt(makeValAndOff64(0, int64(off)))
v0.Aux = sym v0.Aux = symToAux(sym)
v0.AddArg2(ptr, mem) v0.AddArg2(ptr, mem)
return true return true
} }
@ -27146,8 +27146,8 @@ func rewriteValueAMD64_OpAMD64TESTLconst(v *Value) bool {
// cond: c == 0 // cond: c == 0
// result: (FlagEQ) // result: (FlagEQ)
for { for {
c := v.AuxInt c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpAMD64MOVLconst || v_0.AuxInt != c || !(c == 0) { if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c == 0) {
break break
} }
v.reset(OpAMD64FlagEQ) v.reset(OpAMD64FlagEQ)
@ -27157,8 +27157,8 @@ func rewriteValueAMD64_OpAMD64TESTLconst(v *Value) bool {
// cond: c < 0 // cond: c < 0
// result: (FlagLT_UGT) // result: (FlagLT_UGT)
for { for {
c := v.AuxInt c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpAMD64MOVLconst || v_0.AuxInt != c || !(c < 0) { if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c < 0) {
break break
} }
v.reset(OpAMD64FlagLT_UGT) v.reset(OpAMD64FlagLT_UGT)
@ -27168,8 +27168,8 @@ func rewriteValueAMD64_OpAMD64TESTLconst(v *Value) bool {
// cond: c > 0 // cond: c > 0
// result: (FlagGT_UGT) // result: (FlagGT_UGT)
for { for {
c := v.AuxInt c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpAMD64MOVLconst || v_0.AuxInt != c || !(c > 0) { if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c > 0) {
break break
} }
v.reset(OpAMD64FlagGT_UGT) v.reset(OpAMD64FlagGT_UGT)
@ -27217,27 +27217,27 @@ func rewriteValueAMD64_OpAMD64TESTQ(v *Value) bool {
break break
} }
// match: (TESTQ l:(MOVQload {sym} [off] ptr mem) l2) // match: (TESTQ l:(MOVQload {sym} [off] ptr mem) l2)
// cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l) // cond: l == l2 && l.Uses == 2 && validValAndOff(0, int64(off)) && clobber(l)
// result: @l.Block (CMPQconstload {sym} [makeValAndOff(0,off)] ptr mem) // result: @l.Block (CMPQconstload {sym} [makeValAndOff64(0, int64(off))] ptr mem)
for { for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
l := v_0 l := v_0
if l.Op != OpAMD64MOVQload { if l.Op != OpAMD64MOVQload {
continue continue
} }
off := l.AuxInt off := auxIntToInt32(l.AuxInt)
sym := l.Aux sym := auxToSym(l.Aux)
mem := l.Args[1] mem := l.Args[1]
ptr := l.Args[0] ptr := l.Args[0]
l2 := v_1 l2 := v_1
if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) { if !(l == l2 && l.Uses == 2 && validValAndOff(0, int64(off)) && clobber(l)) {
continue continue
} }
b = l.Block b = l.Block
v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags) v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags)
v.copyOf(v0) v.copyOf(v0)
v0.AuxInt = makeValAndOff(0, off) v0.AuxInt = valAndOffToAuxInt(makeValAndOff64(0, int64(off)))
v0.Aux = sym v0.Aux = symToAux(sym)
v0.AddArg2(ptr, mem) v0.AddArg2(ptr, mem)
return true return true
} }
@ -27247,34 +27247,46 @@ func rewriteValueAMD64_OpAMD64TESTQ(v *Value) bool {
} }
func rewriteValueAMD64_OpAMD64TESTQconst(v *Value) bool { func rewriteValueAMD64_OpAMD64TESTQconst(v *Value) bool {
v_0 := v.Args[0] v_0 := v.Args[0]
// match: (TESTQconst [c] (MOVQconst [c])) // match: (TESTQconst [c] (MOVQconst [d]))
// cond: c == 0 // cond: int64(c) == d && c == 0
// result: (FlagEQ) // result: (FlagEQ)
for { for {
c := v.AuxInt c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpAMD64MOVQconst || v_0.AuxInt != c || !(c == 0) { if v_0.Op != OpAMD64MOVQconst {
break
}
d := auxIntToInt64(v_0.AuxInt)
if !(int64(c) == d && c == 0) {
break break
} }
v.reset(OpAMD64FlagEQ) v.reset(OpAMD64FlagEQ)
return true return true
} }
// match: (TESTQconst [c] (MOVQconst [c])) // match: (TESTQconst [c] (MOVQconst [d]))
// cond: c < 0 // cond: int64(c) == d && c < 0
// result: (FlagLT_UGT) // result: (FlagLT_UGT)
for { for {
c := v.AuxInt c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpAMD64MOVQconst || v_0.AuxInt != c || !(c < 0) { if v_0.Op != OpAMD64MOVQconst {
break
}
d := auxIntToInt64(v_0.AuxInt)
if !(int64(c) == d && c < 0) {
break break
} }
v.reset(OpAMD64FlagLT_UGT) v.reset(OpAMD64FlagLT_UGT)
return true return true
} }
// match: (TESTQconst [c] (MOVQconst [c])) // match: (TESTQconst [c] (MOVQconst [d]))
// cond: c > 0 // cond: int64(c) == d && c > 0
// result: (FlagGT_UGT) // result: (FlagGT_UGT)
for { for {
c := v.AuxInt c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpAMD64MOVQconst || v_0.AuxInt != c || !(c > 0) { if v_0.Op != OpAMD64MOVQconst {
break
}
d := auxIntToInt64(v_0.AuxInt)
if !(int64(c) == d && c > 0) {
break break
} }
v.reset(OpAMD64FlagGT_UGT) v.reset(OpAMD64FlagGT_UGT)
@ -27318,27 +27330,27 @@ func rewriteValueAMD64_OpAMD64TESTW(v *Value) bool {
break break
} }
// match: (TESTW l:(MOVWload {sym} [off] ptr mem) l2) // match: (TESTW l:(MOVWload {sym} [off] ptr mem) l2)
// cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l) // cond: l == l2 && l.Uses == 2 && validValAndOff(0, int64(off)) && clobber(l)
// result: @l.Block (CMPWconstload {sym} [makeValAndOff(0,off)] ptr mem) // result: @l.Block (CMPWconstload {sym} [makeValAndOff64(0, int64(off))] ptr mem)
for { for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
l := v_0 l := v_0
if l.Op != OpAMD64MOVWload { if l.Op != OpAMD64MOVWload {
continue continue
} }
off := l.AuxInt off := auxIntToInt32(l.AuxInt)
sym := l.Aux sym := auxToSym(l.Aux)
mem := l.Args[1] mem := l.Args[1]
ptr := l.Args[0] ptr := l.Args[0]
l2 := v_1 l2 := v_1
if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) { if !(l == l2 && l.Uses == 2 && validValAndOff(0, int64(off)) && clobber(l)) {
continue continue
} }
b = l.Block b = l.Block
v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags) v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags)
v.copyOf(v0) v.copyOf(v0)
v0.AuxInt = makeValAndOff(0, off) v0.AuxInt = valAndOffToAuxInt(makeValAndOff64(0, int64(off)))
v0.Aux = sym v0.Aux = symToAux(sym)
v0.AddArg2(ptr, mem) v0.AddArg2(ptr, mem)
return true return true
} }