mirror of
https://github.com/golang/go
synced 2024-11-22 14:15:05 -07:00
[dev.ssa] cmd/compile: port SSA backend to amd64p32
It's not a new backend, just a PtrSize==4 modification of the existing AMD64 backend. Change-Id: Icc63521a5cf4ebb379f7430ef3f070894c09afda Reviewed-on: https://go-review.googlesource.com/25586 Run-TryBot: Keith Randall <khr@golang.org> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com>
This commit is contained in:
parent
f3b4e78516
commit
69a755b602
@ -498,8 +498,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
|
||||
gc.AddAux(&p.From, v)
|
||||
p.To.Type = obj.TYPE_REG
|
||||
p.To.Reg = gc.SSARegNum(v)
|
||||
case ssa.OpAMD64LEAQ:
|
||||
p := gc.Prog(x86.ALEAQ)
|
||||
case ssa.OpAMD64LEAQ, ssa.OpAMD64LEAL:
|
||||
p := gc.Prog(v.Op.Asm())
|
||||
p.From.Type = obj.TYPE_MEM
|
||||
p.From.Reg = gc.SSARegNum(v.Args[0])
|
||||
gc.AddAux(&p.From, v)
|
||||
@ -703,7 +703,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
|
||||
p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg))
|
||||
p.To.Offset = v.AuxInt
|
||||
|
||||
case ssa.OpCopy, ssa.OpAMD64MOVQconvert: // TODO: use MOVQreg for reg->reg copies instead of OpCopy?
|
||||
case ssa.OpCopy, ssa.OpAMD64MOVQconvert, ssa.OpAMD64MOVLconvert: // TODO: use MOVQreg for reg->reg copies instead of OpCopy?
|
||||
if v.Type.IsMemory() {
|
||||
return
|
||||
}
|
||||
|
@ -37,7 +37,7 @@ func shouldssa(fn *Node) bool {
|
||||
if os.Getenv("SSATEST") == "" {
|
||||
return false
|
||||
}
|
||||
case "amd64", "arm":
|
||||
case "amd64", "amd64p32", "arm":
|
||||
// Generally available.
|
||||
}
|
||||
if !ssaEnabled {
|
||||
@ -1657,7 +1657,7 @@ func (s *state) expr(n *Node) *ssa.Value {
|
||||
|
||||
if ft.IsFloat() || tt.IsFloat() {
|
||||
conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]
|
||||
if s.config.IntSize == 4 {
|
||||
if s.config.IntSize == 4 && Thearch.LinkArch.Name != "amd64p32" {
|
||||
if conv1, ok1 := fpConvOpToSSA32[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
|
||||
conv = conv1
|
||||
}
|
||||
@ -2998,6 +2998,10 @@ func (s *state) rtcall(fn *Node, returns bool, results []*Type, args ...*ssa.Val
|
||||
off += size
|
||||
}
|
||||
off = Rnd(off, int64(Widthptr))
|
||||
if Thearch.LinkArch.Name == "amd64p32" {
|
||||
// amd64p32 wants 8-byte alignment of the start of the return values.
|
||||
off = Rnd(off, 8)
|
||||
}
|
||||
|
||||
// Issue call
|
||||
call := s.newValue1A(ssa.OpStaticCall, ssa.TypeMem, fn.Sym, s.mem())
|
||||
|
@ -138,6 +138,17 @@ func NewConfig(arch string, fe Frontend, ctxt *obj.Link, optimize bool) *Config
|
||||
c.fpRegMask = fpRegMaskAMD64
|
||||
c.FPReg = framepointerRegAMD64
|
||||
c.hasGReg = false
|
||||
case "amd64p32":
|
||||
c.IntSize = 4
|
||||
c.PtrSize = 4
|
||||
c.lowerBlock = rewriteBlockAMD64
|
||||
c.lowerValue = rewriteValueAMD64
|
||||
c.registers = registersAMD64[:]
|
||||
c.gpRegMask = gpRegMaskAMD64
|
||||
c.fpRegMask = fpRegMaskAMD64
|
||||
c.FPReg = framepointerRegAMD64
|
||||
c.hasGReg = false
|
||||
c.noDuffDevice = true
|
||||
case "386":
|
||||
c.IntSize = 4
|
||||
c.PtrSize = 4
|
||||
|
@ -108,6 +108,10 @@ func decomposeBuiltIn(f *Func) {
|
||||
func decomposeBuiltInPhi(v *Value) {
|
||||
switch {
|
||||
case v.Type.IsInteger() && v.Type.Size() == 8 && v.Block.Func.Config.IntSize == 4:
|
||||
if v.Block.Func.Config.arch == "amd64p32" {
|
||||
// Even though ints are 32 bits, we have 64-bit ops.
|
||||
break
|
||||
}
|
||||
decomposeInt64Phi(v)
|
||||
case v.Type.IsComplex():
|
||||
decomposeComplexPhi(v)
|
||||
|
@ -4,7 +4,8 @@
|
||||
|
||||
// Lowering arithmetic
|
||||
(Add64 x y) -> (ADDQ x y)
|
||||
(AddPtr x y) -> (ADDQ x y)
|
||||
(AddPtr x y) && config.PtrSize == 8 -> (ADDQ x y)
|
||||
(AddPtr x y) && config.PtrSize == 4 -> (ADDL x y)
|
||||
(Add32 x y) -> (ADDL x y)
|
||||
(Add16 x y) -> (ADDL x y)
|
||||
(Add8 x y) -> (ADDL x y)
|
||||
@ -12,7 +13,8 @@
|
||||
(Add64F x y) -> (ADDSD x y)
|
||||
|
||||
(Sub64 x y) -> (SUBQ x y)
|
||||
(SubPtr x y) -> (SUBQ x y)
|
||||
(SubPtr x y) && config.PtrSize == 8 -> (SUBQ x y)
|
||||
(SubPtr x y) && config.PtrSize == 4 -> (SUBL x y)
|
||||
(Sub32 x y) -> (SUBL x y)
|
||||
(Sub16 x y) -> (SUBL x y)
|
||||
(Sub8 x y) -> (SUBL x y)
|
||||
@ -91,8 +93,9 @@
|
||||
(Not x) -> (XORLconst [1] x)
|
||||
|
||||
// Lowering pointer arithmetic
|
||||
(OffPtr [off] ptr) && is32Bit(off) -> (ADDQconst [off] ptr)
|
||||
(OffPtr [off] ptr) -> (ADDQ (MOVQconst [off]) ptr)
|
||||
(OffPtr [off] ptr) && config.PtrSize == 8 && is32Bit(off) -> (ADDQconst [off] ptr)
|
||||
(OffPtr [off] ptr) && config.PtrSize == 8 -> (ADDQ (MOVQconst [off]) ptr)
|
||||
(OffPtr [off] ptr) && config.PtrSize == 4 -> (ADDLconst [off] ptr)
|
||||
|
||||
// Lowering other arithmetic
|
||||
// TODO: CMPQconst 0 below is redundant because BSF sets Z but how to remove?
|
||||
@ -270,7 +273,8 @@
|
||||
(Eq16 x y) -> (SETEQ (CMPW x y))
|
||||
(Eq8 x y) -> (SETEQ (CMPB x y))
|
||||
(EqB x y) -> (SETEQ (CMPB x y))
|
||||
(EqPtr x y) -> (SETEQ (CMPQ x y))
|
||||
(EqPtr x y) && config.PtrSize == 8 -> (SETEQ (CMPQ x y))
|
||||
(EqPtr x y) && config.PtrSize == 4 -> (SETEQ (CMPL x y))
|
||||
(Eq64F x y) -> (SETEQF (UCOMISD x y))
|
||||
(Eq32F x y) -> (SETEQF (UCOMISS x y))
|
||||
|
||||
@ -279,13 +283,16 @@
|
||||
(Neq16 x y) -> (SETNE (CMPW x y))
|
||||
(Neq8 x y) -> (SETNE (CMPB x y))
|
||||
(NeqB x y) -> (SETNE (CMPB x y))
|
||||
(NeqPtr x y) -> (SETNE (CMPQ x y))
|
||||
(NeqPtr x y) && config.PtrSize == 8 -> (SETNE (CMPQ x y))
|
||||
(NeqPtr x y) && config.PtrSize == 4 -> (SETNE (CMPL x y))
|
||||
(Neq64F x y) -> (SETNEF (UCOMISD x y))
|
||||
(Neq32F x y) -> (SETNEF (UCOMISS x y))
|
||||
|
||||
(Int64Hi x) -> (SHRQconst [32] x) // needed for amd64p32
|
||||
|
||||
// Lowering loads
|
||||
(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) -> (MOVQload ptr mem)
|
||||
(Load <t> ptr mem) && is32BitInt(t) -> (MOVLload ptr mem)
|
||||
(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t) && config.PtrSize == 8) -> (MOVQload ptr mem)
|
||||
(Load <t> ptr mem) && (is32BitInt(t) || isPtr(t) && config.PtrSize == 4) -> (MOVLload ptr mem)
|
||||
(Load <t> ptr mem) && is16BitInt(t) -> (MOVWload ptr mem)
|
||||
(Load <t> ptr mem) && (t.IsBoolean() || is8BitInt(t)) -> (MOVBload ptr mem)
|
||||
(Load <t> ptr mem) && is32BitFloat(t) -> (MOVSSload ptr mem)
|
||||
@ -328,14 +335,14 @@
|
||||
(Move [s] dst src mem)
|
||||
&& SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 <= 8 ->
|
||||
(Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%16]
|
||||
(ADDQconst <dst.Type> dst [SizeAndAlign(s).Size()%16])
|
||||
(ADDQconst <src.Type> src [SizeAndAlign(s).Size()%16])
|
||||
(OffPtr <dst.Type> dst [SizeAndAlign(s).Size()%16])
|
||||
(OffPtr <src.Type> src [SizeAndAlign(s).Size()%16])
|
||||
(MOVQstore dst (MOVQload src mem) mem))
|
||||
(Move [s] dst src mem)
|
||||
&& SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 > 8 ->
|
||||
(Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%16]
|
||||
(ADDQconst <dst.Type> dst [SizeAndAlign(s).Size()%16])
|
||||
(ADDQconst <src.Type> src [SizeAndAlign(s).Size()%16])
|
||||
(OffPtr <dst.Type> dst [SizeAndAlign(s).Size()%16])
|
||||
(OffPtr <src.Type> src [SizeAndAlign(s).Size()%16])
|
||||
(MOVOstore dst (MOVOload src mem) mem))
|
||||
|
||||
// Medium copying uses a duff device.
|
||||
@ -376,7 +383,7 @@
|
||||
|
||||
// Strip off any fractional word zeroing.
|
||||
(Zero [s] destptr mem) && SizeAndAlign(s).Size()%8 != 0 && SizeAndAlign(s).Size() > 8 ->
|
||||
(Zero [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%8] (ADDQconst destptr [SizeAndAlign(s).Size()%8])
|
||||
(Zero [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%8] (OffPtr <destptr.Type> destptr [SizeAndAlign(s).Size()%8])
|
||||
(MOVQstoreconst [0] destptr mem))
|
||||
|
||||
// Zero small numbers of words directly.
|
||||
@ -397,7 +404,7 @@
|
||||
(Zero [s] destptr mem)
|
||||
&& SizeAndAlign(s).Size() <= 1024 && SizeAndAlign(s).Size()%8 == 0 && SizeAndAlign(s).Size()%16 != 0
|
||||
&& !config.noDuffDevice ->
|
||||
(Zero [SizeAndAlign(s).Size()-8] (ADDQconst [8] destptr) (MOVQstore destptr (MOVQconst [0]) mem))
|
||||
(Zero [SizeAndAlign(s).Size()-8] (OffPtr <destptr.Type> [8] destptr) (MOVQstore destptr (MOVQconst [0]) mem))
|
||||
(Zero [s] destptr mem)
|
||||
&& SizeAndAlign(s).Size() <= 1024 && SizeAndAlign(s).Size()%16 == 0 && !config.noDuffDevice ->
|
||||
(DUFFZERO [SizeAndAlign(s).Size()] destptr (MOVOconst [0]) mem)
|
||||
@ -415,7 +422,8 @@
|
||||
(Const64 [val]) -> (MOVQconst [val])
|
||||
(Const32F [val]) -> (MOVSSconst [val])
|
||||
(Const64F [val]) -> (MOVSDconst [val])
|
||||
(ConstNil) -> (MOVQconst [0])
|
||||
(ConstNil) && config.PtrSize == 8 -> (MOVQconst [0])
|
||||
(ConstNil) && config.PtrSize == 4 -> (MOVLconst [0])
|
||||
(ConstBool [b]) -> (MOVLconst [b])
|
||||
|
||||
// Lowering calls
|
||||
@ -426,14 +434,17 @@
|
||||
(InterCall [argwid] entry mem) -> (CALLinter [argwid] entry mem)
|
||||
|
||||
// Miscellaneous
|
||||
(Convert <t> x mem) -> (MOVQconvert <t> x mem)
|
||||
(IsNonNil p) -> (SETNE (TESTQ p p))
|
||||
(Convert <t> x mem) && config.PtrSize == 8 -> (MOVQconvert <t> x mem)
|
||||
(Convert <t> x mem) && config.PtrSize == 4 -> (MOVLconvert <t> x mem)
|
||||
(IsNonNil p) && config.PtrSize == 8 -> (SETNE (TESTQ p p))
|
||||
(IsNonNil p) && config.PtrSize == 4 -> (SETNE (TESTL p p))
|
||||
(IsInBounds idx len) -> (SETB (CMPQ idx len))
|
||||
(IsSliceInBounds idx len) -> (SETBE (CMPQ idx len))
|
||||
(NilCheck ptr mem) -> (LoweredNilCheck ptr mem)
|
||||
(GetG mem) -> (LoweredGetG mem)
|
||||
(GetClosurePtr) -> (LoweredGetClosurePtr)
|
||||
(Addr {sym} base) -> (LEAQ {sym} base)
|
||||
(Addr {sym} base) && config.PtrSize == 8 -> (LEAQ {sym} base)
|
||||
(Addr {sym} base) && config.PtrSize == 4 -> (LEAL {sym} base)
|
||||
|
||||
// block rewrites
|
||||
(If (SETL cmp) yes no) -> (LT cmp yes no)
|
||||
@ -1592,3 +1603,53 @@
|
||||
&& x.Uses == 1
|
||||
&& clobber(x)
|
||||
-> (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w0 mem)
|
||||
|
||||
// amd64p32 rules
|
||||
// same as the rules above, but with 32 instead of 64 bit pointer arithmetic.
|
||||
// LEAQ,ADDQ -> LEAL,ADDL
|
||||
(ADDLconst [c] (LEAL [d] {s} x)) && is32Bit(c+d) -> (LEAL [c+d] {s} x)
|
||||
(LEAL [c] {s} (ADDLconst [d] x)) && is32Bit(c+d) -> (LEAL [c+d] {s} x)
|
||||
|
||||
(MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) ->
|
||||
(MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem)
|
||||
(MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) ->
|
||||
(MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem)
|
||||
(MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) ->
|
||||
(MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
|
||||
(MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) ->
|
||||
(MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
|
||||
|
||||
(MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) ->
|
||||
(MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
|
||||
(MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) ->
|
||||
(MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
|
||||
(MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) ->
|
||||
(MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
|
||||
(MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) ->
|
||||
(MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
|
||||
|
||||
(MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
|
||||
(MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
|
||||
(MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
|
||||
(MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
|
||||
(MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
|
||||
(MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
|
||||
(MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) ->
|
||||
(MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
|
||||
|
||||
(MOVQload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVQload [off1+off2] {sym} ptr mem)
|
||||
(MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVLload [off1+off2] {sym} ptr mem)
|
||||
(MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVWload [off1+off2] {sym} ptr mem)
|
||||
(MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVBload [off1+off2] {sym} ptr mem)
|
||||
(MOVQstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVQstore [off1+off2] {sym} ptr val mem)
|
||||
(MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVLstore [off1+off2] {sym} ptr val mem)
|
||||
(MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVWstore [off1+off2] {sym} ptr val mem)
|
||||
(MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVBstore [off1+off2] {sym} ptr val mem)
|
||||
(MOVQstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
|
||||
(MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
|
||||
(MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
|
||||
(MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
|
||||
(MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
|
||||
(MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
|
||||
(MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) ->
|
||||
(MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
|
||||
|
@ -354,13 +354,15 @@ func init() {
|
||||
|
||||
{name: "PXOR", argLength: 2, reg: fp21, asm: "PXOR", commutative: true, resultInArg0: true}, // exclusive or, applied to X regs for float negation.
|
||||
|
||||
{name: "LEAQ", argLength: 1, reg: gp11sb, aux: "SymOff", rematerializeable: true}, // arg0 + auxint + offset encoded in aux
|
||||
{name: "LEAQ1", argLength: 2, reg: gp21sb, aux: "SymOff"}, // arg0 + arg1 + auxint + aux
|
||||
{name: "LEAQ2", argLength: 2, reg: gp21sb, aux: "SymOff"}, // arg0 + 2*arg1 + auxint + aux
|
||||
{name: "LEAQ4", argLength: 2, reg: gp21sb, aux: "SymOff"}, // arg0 + 4*arg1 + auxint + aux
|
||||
{name: "LEAQ8", argLength: 2, reg: gp21sb, aux: "SymOff"}, // arg0 + 8*arg1 + auxint + aux
|
||||
{name: "LEAQ", argLength: 1, reg: gp11sb, asm: "LEAQ", aux: "SymOff", rematerializeable: true}, // arg0 + auxint + offset encoded in aux
|
||||
{name: "LEAQ1", argLength: 2, reg: gp21sb, aux: "SymOff"}, // arg0 + arg1 + auxint + aux
|
||||
{name: "LEAQ2", argLength: 2, reg: gp21sb, aux: "SymOff"}, // arg0 + 2*arg1 + auxint + aux
|
||||
{name: "LEAQ4", argLength: 2, reg: gp21sb, aux: "SymOff"}, // arg0 + 4*arg1 + auxint + aux
|
||||
{name: "LEAQ8", argLength: 2, reg: gp21sb, aux: "SymOff"}, // arg0 + 8*arg1 + auxint + aux
|
||||
// Note: LEAQ{1,2,4,8} must not have OpSB as either argument.
|
||||
|
||||
{name: "LEAL", argLength: 1, reg: gp11sb, asm: "LEAL", aux: "SymOff", rematerializeable: true}, // arg0 + auxint + offset encoded in aux
|
||||
|
||||
// auxint+aux == add auxint and the offset of the symbol in aux (if any) to the effective address
|
||||
{name: "MOVBload", argLength: 2, reg: gpload, asm: "MOVBLZX", aux: "SymOff", typ: "UInt8"}, // load byte from arg0+auxint+aux. arg1=mem. Zero extend.
|
||||
{name: "MOVBQSXload", argLength: 2, reg: gpload, asm: "MOVBQSX", aux: "SymOff"}, // ditto, sign extend to int64
|
||||
@ -499,6 +501,7 @@ func init() {
|
||||
// gets correctly ordered with respect to GC safepoints.
|
||||
// arg0=ptr/int arg1=mem, output=int/ptr
|
||||
{name: "MOVQconvert", argLength: 2, reg: gp11, asm: "MOVQ"},
|
||||
{name: "MOVLconvert", argLength: 2, reg: gp11, asm: "MOVL"}, // amd64p32 equivalent
|
||||
|
||||
// Constant flag values. For any comparison, there are 5 possible
|
||||
// outcomes: the three from the signed total order (<,==,>) and the
|
||||
|
@ -510,6 +510,7 @@ const (
|
||||
OpAMD64LEAQ2
|
||||
OpAMD64LEAQ4
|
||||
OpAMD64LEAQ8
|
||||
OpAMD64LEAL
|
||||
OpAMD64MOVBload
|
||||
OpAMD64MOVBQSXload
|
||||
OpAMD64MOVWload
|
||||
@ -563,6 +564,7 @@ const (
|
||||
OpAMD64LoweredGetClosurePtr
|
||||
OpAMD64LoweredNilCheck
|
||||
OpAMD64MOVQconvert
|
||||
OpAMD64MOVLconvert
|
||||
OpAMD64FlagEQ
|
||||
OpAMD64FlagLT_ULT
|
||||
OpAMD64FlagLT_UGT
|
||||
@ -5926,6 +5928,7 @@ var opcodeTable = [...]opInfo{
|
||||
auxType: auxSymOff,
|
||||
argLen: 1,
|
||||
rematerializeable: true,
|
||||
asm: x86.ALEAQ,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
|
||||
@ -5991,6 +5994,21 @@ var opcodeTable = [...]opInfo{
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "LEAL",
|
||||
auxType: auxSymOff,
|
||||
argLen: 1,
|
||||
rematerializeable: true,
|
||||
asm: x86.ALEAL,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
|
||||
},
|
||||
outputs: []outputInfo{
|
||||
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "MOVBload",
|
||||
auxType: auxSymOff,
|
||||
@ -6646,6 +6664,19 @@ var opcodeTable = [...]opInfo{
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "MOVLconvert",
|
||||
argLen: 2,
|
||||
asm: x86.AMOVL,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
|
||||
},
|
||||
outputs: []outputInfo{
|
||||
{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "FlagEQ",
|
||||
argLen: 0,
|
||||
|
@ -11,7 +11,7 @@ func opt(f *Func) {
|
||||
|
||||
func dec(f *Func) {
|
||||
applyRewrite(f, rewriteBlockdec, rewriteValuedec)
|
||||
if f.Config.IntSize == 4 {
|
||||
if f.Config.IntSize == 4 && f.Config.arch != "amd64p32" {
|
||||
applyRewrite(f, rewriteBlockdec64, rewriteValuedec64)
|
||||
}
|
||||
}
|
||||
|
@ -492,8 +492,14 @@ func (s *regAllocState) init(f *Func) {
|
||||
s.f.Config.fe.Unimplementedf(0, "arch %s not implemented", s.f.Config.arch)
|
||||
}
|
||||
}
|
||||
if s.f.Config.nacl && s.f.Config.arch == "arm" {
|
||||
s.allocatable &^= 1 << 9 // R9 is "thread pointer" on nacl/arm
|
||||
if s.f.Config.nacl {
|
||||
switch s.f.Config.arch {
|
||||
case "arm":
|
||||
s.allocatable &^= 1 << 9 // R9 is "thread pointer" on nacl/arm
|
||||
case "amd64p32":
|
||||
s.allocatable &^= 1 << 5 // BP - reserved for nacl
|
||||
s.allocatable &^= 1 << 15 // R15 - reserved for nacl
|
||||
}
|
||||
}
|
||||
|
||||
s.regs = make([]regState, s.numRegs)
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,4 +1,4 @@
|
||||
// +build !amd64,!arm
|
||||
// +build !amd64,!arm,!amd64p32
|
||||
// errorcheck -0 -l -live -wb=0
|
||||
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
|
@ -1,4 +1,4 @@
|
||||
// +build amd64 arm
|
||||
// +build amd64 arm amd64p32
|
||||
// errorcheck -0 -l -live -wb=0
|
||||
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
|
@ -2,7 +2,7 @@
|
||||
// Fails on ppc64x because of incomplete optimization.
|
||||
// See issues 9058.
|
||||
// Same reason for mips64x and s390x.
|
||||
// +build !ppc64,!ppc64le,!mips64,!mips64le,!amd64,!s390x,!arm
|
||||
// +build !ppc64,!ppc64le,!mips64,!mips64le,!amd64,!s390x,!arm,!amd64p32
|
||||
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
|
@ -1,5 +1,5 @@
|
||||
// errorcheck -0 -d=nil
|
||||
// +build amd64 arm
|
||||
// +build amd64 arm amd64p32
|
||||
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
|
@ -1,4 +1,4 @@
|
||||
// +build !amd64,!arm
|
||||
// +build !amd64,!arm,!amd64p32
|
||||
// errorcheck -0 -d=append,slice
|
||||
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
|
Loading…
Reference in New Issue
Block a user