1
0
mirror of https://github.com/golang/go synced 2024-11-11 18:51:37 -07:00

cmd/compile: intrinsify atomics on MIPS64

Change-Id: Ica65b7a52af9558a05d0a0e1dff0f9ec838f4117
Reviewed-on: https://go-review.googlesource.com/68830
Run-TryBot: Cherry Zhang <cherryyz@google.com>
Reviewed-by: Keith Randall <khr@golang.org>
This commit is contained in:
Cherry Zhang 2017-10-05 10:11:38 -04:00
parent b80029ccf4
commit d63de28711
7 changed files with 832 additions and 14 deletions

View File

@ -2676,40 +2676,40 @@ func init() {
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
},
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64)
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64, sys.PPC64)
addF("runtime/internal/atomic", "Load64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoad64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
},
sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64)
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS64, sys.PPC64)
addF("runtime/internal/atomic", "Loadp",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(s.f.Config.Types.BytePtr, types.TypeMem), args[0], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, s.f.Config.Types.BytePtr, v)
},
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64)
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64, sys.PPC64)
addF("runtime/internal/atomic", "Store",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore32, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64)
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64, sys.PPC64)
addF("runtime/internal/atomic", "Store64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore64, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64)
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS64, sys.PPC64)
addF("runtime/internal/atomic", "StorepNoWB",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
s.vars[&memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS)
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64)
addF("runtime/internal/atomic", "Xchg",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
@ -2717,14 +2717,14 @@ func init() {
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
},
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64)
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64, sys.PPC64)
addF("runtime/internal/atomic", "Xchg64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue3(ssa.OpAtomicExchange64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], args[1], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
},
sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64)
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS64, sys.PPC64)
addF("runtime/internal/atomic", "Xadd",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
@ -2732,14 +2732,14 @@ func init() {
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
},
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64)
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64, sys.PPC64)
addF("runtime/internal/atomic", "Xadd64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue3(ssa.OpAtomicAdd64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], args[1], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
},
sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64)
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS64, sys.PPC64)
addF("runtime/internal/atomic", "Cas",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
@ -2747,14 +2747,14 @@ func init() {
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v)
},
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64)
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64, sys.PPC64)
addF("runtime/internal/atomic", "Cas64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue4(ssa.OpAtomicCompareAndSwap64, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v)
},
sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64)
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS64, sys.PPC64)
addF("runtime/internal/atomic", "And8",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {

View File

@ -483,6 +483,211 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
gc.Patch(p6, p2)
case ssa.OpMIPS64CALLstatic, ssa.OpMIPS64CALLclosure, ssa.OpMIPS64CALLinter:
s.Call(v)
case ssa.OpMIPS64LoweredAtomicLoad32, ssa.OpMIPS64LoweredAtomicLoad64:
as := mips.AMOVV
if v.Op == ssa.OpMIPS64LoweredAtomicLoad32 {
as = mips.AMOVW
}
s.Prog(mips.ASYNC)
p := s.Prog(as)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0()
s.Prog(mips.ASYNC)
case ssa.OpMIPS64LoweredAtomicStore32, ssa.OpMIPS64LoweredAtomicStore64:
as := mips.AMOVV
if v.Op == ssa.OpMIPS64LoweredAtomicStore32 {
as = mips.AMOVW
}
s.Prog(mips.ASYNC)
p := s.Prog(as)
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
s.Prog(mips.ASYNC)
case ssa.OpMIPS64LoweredAtomicStorezero32, ssa.OpMIPS64LoweredAtomicStorezero64:
as := mips.AMOVV
if v.Op == ssa.OpMIPS64LoweredAtomicStorezero32 {
as = mips.AMOVW
}
s.Prog(mips.ASYNC)
p := s.Prog(as)
p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REGZERO
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
s.Prog(mips.ASYNC)
case ssa.OpMIPS64LoweredAtomicExchange32, ssa.OpMIPS64LoweredAtomicExchange64:
// SYNC
// MOVV Rarg1, Rtmp
// LL (Rarg0), Rout
// SC Rtmp, (Rarg0)
// BEQ Rtmp, -3(PC)
// SYNC
ll := mips.ALLV
sc := mips.ASCV
if v.Op == ssa.OpMIPS64LoweredAtomicExchange32 {
ll = mips.ALL
sc = mips.ASC
}
s.Prog(mips.ASYNC)
p := s.Prog(mips.AMOVV)
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = mips.REGTMP
p1 := s.Prog(ll)
p1.From.Type = obj.TYPE_MEM
p1.From.Reg = v.Args[0].Reg()
p1.To.Type = obj.TYPE_REG
p1.To.Reg = v.Reg0()
p2 := s.Prog(sc)
p2.From.Type = obj.TYPE_REG
p2.From.Reg = mips.REGTMP
p2.To.Type = obj.TYPE_MEM
p2.To.Reg = v.Args[0].Reg()
p3 := s.Prog(mips.ABEQ)
p3.From.Type = obj.TYPE_REG
p3.From.Reg = mips.REGTMP
p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p)
s.Prog(mips.ASYNC)
case ssa.OpMIPS64LoweredAtomicAdd32, ssa.OpMIPS64LoweredAtomicAdd64:
// SYNC
// LL (Rarg0), Rout
// ADDV Rarg1, Rout, Rtmp
// SC Rtmp, (Rarg0)
// BEQ Rtmp, -3(PC)
// SYNC
// ADDV Rarg1, Rout
ll := mips.ALLV
sc := mips.ASCV
if v.Op == ssa.OpMIPS64LoweredAtomicAdd32 {
ll = mips.ALL
sc = mips.ASC
}
s.Prog(mips.ASYNC)
p := s.Prog(ll)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0()
p1 := s.Prog(mips.AADDVU)
p1.From.Type = obj.TYPE_REG
p1.From.Reg = v.Args[1].Reg()
p1.Reg = v.Reg0()
p1.To.Type = obj.TYPE_REG
p1.To.Reg = mips.REGTMP
p2 := s.Prog(sc)
p2.From.Type = obj.TYPE_REG
p2.From.Reg = mips.REGTMP
p2.To.Type = obj.TYPE_MEM
p2.To.Reg = v.Args[0].Reg()
p3 := s.Prog(mips.ABEQ)
p3.From.Type = obj.TYPE_REG
p3.From.Reg = mips.REGTMP
p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p)
s.Prog(mips.ASYNC)
p4 := s.Prog(mips.AADDVU)
p4.From.Type = obj.TYPE_REG
p4.From.Reg = v.Args[1].Reg()
p4.Reg = v.Reg0()
p4.To.Type = obj.TYPE_REG
p4.To.Reg = v.Reg0()
case ssa.OpMIPS64LoweredAtomicAddconst32, ssa.OpMIPS64LoweredAtomicAddconst64:
// SYNC
// LL (Rarg0), Rout
// ADDV $auxint, Rout, Rtmp
// SC Rtmp, (Rarg0)
// BEQ Rtmp, -3(PC)
// SYNC
// ADDV $auxint, Rout
ll := mips.ALLV
sc := mips.ASCV
if v.Op == ssa.OpMIPS64LoweredAtomicAddconst32 {
ll = mips.ALL
sc = mips.ASC
}
s.Prog(mips.ASYNC)
p := s.Prog(ll)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0()
p1 := s.Prog(mips.AADDVU)
p1.From.Type = obj.TYPE_CONST
p1.From.Offset = v.AuxInt
p1.Reg = v.Reg0()
p1.To.Type = obj.TYPE_REG
p1.To.Reg = mips.REGTMP
p2 := s.Prog(sc)
p2.From.Type = obj.TYPE_REG
p2.From.Reg = mips.REGTMP
p2.To.Type = obj.TYPE_MEM
p2.To.Reg = v.Args[0].Reg()
p3 := s.Prog(mips.ABEQ)
p3.From.Type = obj.TYPE_REG
p3.From.Reg = mips.REGTMP
p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p)
s.Prog(mips.ASYNC)
p4 := s.Prog(mips.AADDVU)
p4.From.Type = obj.TYPE_CONST
p4.From.Offset = v.AuxInt
p4.Reg = v.Reg0()
p4.To.Type = obj.TYPE_REG
p4.To.Reg = v.Reg0()
case ssa.OpMIPS64LoweredAtomicCas32, ssa.OpMIPS64LoweredAtomicCas64:
// MOVV $0, Rout
// SYNC
// LL (Rarg0), Rtmp
// BNE Rtmp, Rarg1, 4(PC)
// MOVV Rarg2, Rout
// SC Rout, (Rarg0)
// BEQ Rout, -4(PC)
// SYNC
ll := mips.ALLV
sc := mips.ASCV
if v.Op == ssa.OpMIPS64LoweredAtomicCas32 {
ll = mips.ALL
sc = mips.ASC
}
p := s.Prog(mips.AMOVV)
p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REGZERO
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0()
s.Prog(mips.ASYNC)
p1 := s.Prog(ll)
p1.From.Type = obj.TYPE_MEM
p1.From.Reg = v.Args[0].Reg()
p1.To.Type = obj.TYPE_REG
p1.To.Reg = mips.REGTMP
p2 := s.Prog(mips.ABNE)
p2.From.Type = obj.TYPE_REG
p2.From.Reg = v.Args[1].Reg()
p2.Reg = mips.REGTMP
p2.To.Type = obj.TYPE_BRANCH
p3 := s.Prog(mips.AMOVV)
p3.From.Type = obj.TYPE_REG
p3.From.Reg = v.Args[2].Reg()
p3.To.Type = obj.TYPE_REG
p3.To.Reg = v.Reg0()
p4 := s.Prog(sc)
p4.From.Type = obj.TYPE_REG
p4.From.Reg = v.Reg0()
p4.To.Type = obj.TYPE_MEM
p4.To.Reg = v.Args[0].Reg()
p5 := s.Prog(mips.ABEQ)
p5.From.Type = obj.TYPE_REG
p5.From.Reg = v.Reg0()
p5.To.Type = obj.TYPE_BRANCH
gc.Patch(p5, p1)
p6 := s.Prog(mips.ASYNC)
gc.Patch(p2, p6)
case ssa.OpMIPS64LoweredNilCheck:
// Issue a load which will fault if arg is nil.
p := s.Prog(mips.AMOVB)

View File

@ -419,6 +419,24 @@
(ClosureCall [argwid] entry closure mem) -> (CALLclosure [argwid] entry closure mem)
(InterCall [argwid] entry mem) -> (CALLinter [argwid] entry mem)
// atomic intrinsics
(AtomicLoad32 ptr mem) -> (LoweredAtomicLoad32 ptr mem)
(AtomicLoad64 ptr mem) -> (LoweredAtomicLoad64 ptr mem)
(AtomicLoadPtr ptr mem) -> (LoweredAtomicLoad64 ptr mem)
(AtomicStore32 ptr val mem) -> (LoweredAtomicStore32 ptr val mem)
(AtomicStore64 ptr val mem) -> (LoweredAtomicStore64 ptr val mem)
(AtomicStorePtrNoWB ptr val mem) -> (LoweredAtomicStore64 ptr val mem)
(AtomicExchange32 ptr val mem) -> (LoweredAtomicExchange32 ptr val mem)
(AtomicExchange64 ptr val mem) -> (LoweredAtomicExchange64 ptr val mem)
(AtomicAdd32 ptr val mem) -> (LoweredAtomicAdd32 ptr val mem)
(AtomicAdd64 ptr val mem) -> (LoweredAtomicAdd64 ptr val mem)
(AtomicCompareAndSwap32 ptr old new_ mem) -> (LoweredAtomicCas32 ptr old new_ mem)
(AtomicCompareAndSwap64 ptr old new_ mem) -> (LoweredAtomicCas64 ptr old new_ mem)
// checks
(NilCheck ptr mem) -> (LoweredNilCheck ptr mem)
(IsNonNil ptr) -> (SGTU ptr (MOVVconst [0]))
@ -662,6 +680,10 @@
(MOVWreg (MOVVconst [c])) -> (MOVVconst [int64(int32(c))])
(MOVWUreg (MOVVconst [c])) -> (MOVVconst [int64(uint32(c))])
(MOVVreg (MOVVconst [c])) -> (MOVVconst [c])
(LoweredAtomicStore32 ptr (MOVVconst [0]) mem) -> (LoweredAtomicStorezero32 ptr mem)
(LoweredAtomicStore64 ptr (MOVVconst [0]) mem) -> (LoweredAtomicStorezero64 ptr mem)
(LoweredAtomicAdd32 ptr (MOVVconst [c]) mem) && is32Bit(c) -> (LoweredAtomicAddconst32 [c] ptr mem)
(LoweredAtomicAdd64 ptr (MOVVconst [c]) mem) && is32Bit(c) -> (LoweredAtomicAddconst64 [c] ptr mem)
// constant comparisons
(SGTconst [c] (MOVVconst [d])) && int64(c)>int64(d) -> (MOVVconst [1])

View File

@ -147,6 +147,8 @@ func init() {
gpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}}
gpstore = regInfo{inputs: []regMask{gpspsbg, gpg}}
gpstore0 = regInfo{inputs: []regMask{gpspsbg}}
gpxchg = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{gp}}
gpcas = regInfo{inputs: []regMask{gpspsbg, gpg, gpg}, outputs: []regMask{gp}}
fp01 = regInfo{inputs: nil, outputs: []regMask{fp}}
fp11 = regInfo{inputs: []regMask{fp}, outputs: []regMask{fp}}
//fp1flags = regInfo{inputs: []regMask{fp}}
@ -161,7 +163,7 @@ func init() {
ops := []opData{
// binary ops
{name: "ADDV", argLength: 2, reg: gp21, asm: "ADDVU", commutative: true}, // arg0 + arg1
{name: "ADDVconst", argLength: 1, reg: gp11sp, asm: "ADDVU", aux: "Int64"}, // arg0 + auxInt
{name: "ADDVconst", argLength: 1, reg: gp11sp, asm: "ADDVU", aux: "Int64"}, // arg0 + auxInt. auxInt is 32-bit, also in other *const ops.
{name: "SUBV", argLength: 2, reg: gp21, asm: "SUBVU"}, // arg0 - arg1
{name: "SUBVconst", argLength: 1, reg: gp11, asm: "SUBVU", aux: "Int64"}, // arg0 - auxInt
{name: "MULV", argLength: 2, reg: gp2hilo, asm: "MULV", commutative: true, typ: "(Int64,Int64)"}, // arg0 * arg1, signed, results hi,lo
@ -333,6 +335,65 @@ func init() {
faultOnNilArg1: true,
},
// atomic loads.
// load from arg0. arg1=mem.
// returns <value,memory> so they can be properly ordered with other loads.
{name: "LoweredAtomicLoad32", argLength: 2, reg: gpload, faultOnNilArg0: true},
{name: "LoweredAtomicLoad64", argLength: 2, reg: gpload, faultOnNilArg0: true},
// atomic stores.
// store arg1 to arg0. arg2=mem. returns memory.
{name: "LoweredAtomicStore32", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true},
{name: "LoweredAtomicStore64", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true},
// store zero to arg0. arg1=mem. returns memory.
{name: "LoweredAtomicStorezero32", argLength: 2, reg: gpstore0, faultOnNilArg0: true, hasSideEffects: true},
{name: "LoweredAtomicStorezero64", argLength: 2, reg: gpstore0, faultOnNilArg0: true, hasSideEffects: true},
// atomic exchange.
// store arg1 to arg0. arg2=mem. returns <old content of *arg0, memory>.
// SYNC
// LL (Rarg0), Rout
// MOVV Rarg1, Rtmp
// SC Rtmp, (Rarg0)
// BEQ Rtmp, -3(PC)
// SYNC
{name: "LoweredAtomicExchange32", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true},
{name: "LoweredAtomicExchange64", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true},
// atomic add.
// *arg0 += arg1. arg2=mem. returns <new content of *arg0, memory>.
// SYNC
// LL (Rarg0), Rout
// ADDV Rarg1, Rout, Rtmp
// SC Rtmp, (Rarg0)
// BEQ Rtmp, -3(PC)
// SYNC
// ADDV Rarg1, Rout
{name: "LoweredAtomicAdd32", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true},
{name: "LoweredAtomicAdd64", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true},
// *arg0 += auxint. arg1=mem. returns <new content of *arg0, memory>. auxint is 32-bit.
{name: "LoweredAtomicAddconst32", argLength: 2, reg: regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}}, aux: "Int32", resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true},
{name: "LoweredAtomicAddconst64", argLength: 2, reg: regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}}, aux: "Int64", resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true},
// atomic compare and swap.
// arg0 = pointer, arg1 = old value, arg2 = new value, arg3 = memory.
// if *arg0 == arg1 {
// *arg0 = arg2
// return (true, memory)
// } else {
// return (false, memory)
// }
// SYNC
// MOVV $0, Rout
// LL (Rarg0), Rtmp
// BNE Rtmp, Rarg1, 4(PC)
// MOVV Rarg2, Rout
// SC Rout, (Rarg0)
// BEQ Rout, -4(PC)
// SYNC
{name: "LoweredAtomicCas32", argLength: 4, reg: gpcas, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true},
{name: "LoweredAtomicCas64", argLength: 4, reg: gpcas, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true},
// pseudo-ops
{name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpg}}, nilCheck: true, faultOnNilArg0: true}, // panic if arg0 is nil. arg1=mem.

View File

@ -1299,6 +1299,20 @@ const (
OpMIPS64DUFFZERO
OpMIPS64LoweredZero
OpMIPS64LoweredMove
OpMIPS64LoweredAtomicLoad32
OpMIPS64LoweredAtomicLoad64
OpMIPS64LoweredAtomicStore32
OpMIPS64LoweredAtomicStore64
OpMIPS64LoweredAtomicStorezero32
OpMIPS64LoweredAtomicStorezero64
OpMIPS64LoweredAtomicExchange32
OpMIPS64LoweredAtomicExchange64
OpMIPS64LoweredAtomicAdd32
OpMIPS64LoweredAtomicAdd64
OpMIPS64LoweredAtomicAddconst32
OpMIPS64LoweredAtomicAddconst64
OpMIPS64LoweredAtomicCas32
OpMIPS64LoweredAtomicCas64
OpMIPS64LoweredNilCheck
OpMIPS64FPFlagTrue
OpMIPS64FPFlagFalse
@ -16635,6 +16649,208 @@ var opcodeTable = [...]opInfo{
clobbers: 6, // R1 R2
},
},
{
name: "LoweredAtomicLoad32",
argLen: 2,
faultOnNilArg0: true,
reg: regInfo{
inputs: []inputInfo{
{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
},
outputs: []outputInfo{
{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
},
},
},
{
name: "LoweredAtomicLoad64",
argLen: 2,
faultOnNilArg0: true,
reg: regInfo{
inputs: []inputInfo{
{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
},
outputs: []outputInfo{
{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
},
},
},
{
name: "LoweredAtomicStore32",
argLen: 3,
faultOnNilArg0: true,
hasSideEffects: true,
reg: regInfo{
inputs: []inputInfo{
{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
},
},
},
{
name: "LoweredAtomicStore64",
argLen: 3,
faultOnNilArg0: true,
hasSideEffects: true,
reg: regInfo{
inputs: []inputInfo{
{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
},
},
},
{
name: "LoweredAtomicStorezero32",
argLen: 2,
faultOnNilArg0: true,
hasSideEffects: true,
reg: regInfo{
inputs: []inputInfo{
{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
},
},
},
{
name: "LoweredAtomicStorezero64",
argLen: 2,
faultOnNilArg0: true,
hasSideEffects: true,
reg: regInfo{
inputs: []inputInfo{
{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
},
},
},
{
name: "LoweredAtomicExchange32",
argLen: 3,
resultNotInArgs: true,
faultOnNilArg0: true,
hasSideEffects: true,
reg: regInfo{
inputs: []inputInfo{
{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
},
outputs: []outputInfo{
{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
},
},
},
{
name: "LoweredAtomicExchange64",
argLen: 3,
resultNotInArgs: true,
faultOnNilArg0: true,
hasSideEffects: true,
reg: regInfo{
inputs: []inputInfo{
{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
},
outputs: []outputInfo{
{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
},
},
},
{
name: "LoweredAtomicAdd32",
argLen: 3,
resultNotInArgs: true,
faultOnNilArg0: true,
hasSideEffects: true,
reg: regInfo{
inputs: []inputInfo{
{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
},
outputs: []outputInfo{
{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
},
},
},
{
name: "LoweredAtomicAdd64",
argLen: 3,
resultNotInArgs: true,
faultOnNilArg0: true,
hasSideEffects: true,
reg: regInfo{
inputs: []inputInfo{
{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
},
outputs: []outputInfo{
{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
},
},
},
{
name: "LoweredAtomicAddconst32",
auxType: auxInt32,
argLen: 2,
resultNotInArgs: true,
faultOnNilArg0: true,
hasSideEffects: true,
reg: regInfo{
inputs: []inputInfo{
{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
},
outputs: []outputInfo{
{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
},
},
},
{
name: "LoweredAtomicAddconst64",
auxType: auxInt64,
argLen: 2,
resultNotInArgs: true,
faultOnNilArg0: true,
hasSideEffects: true,
reg: regInfo{
inputs: []inputInfo{
{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
},
outputs: []outputInfo{
{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
},
},
},
{
name: "LoweredAtomicCas32",
argLen: 4,
resultNotInArgs: true,
faultOnNilArg0: true,
hasSideEffects: true,
reg: regInfo{
inputs: []inputInfo{
{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
{2, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
},
outputs: []outputInfo{
{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
},
},
},
{
name: "LoweredAtomicCas64",
argLen: 4,
resultNotInArgs: true,
faultOnNilArg0: true,
hasSideEffects: true,
reg: regInfo{
inputs: []inputInfo{
{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
{2, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
},
outputs: []outputInfo{
{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
},
},
},
{
name: "LoweredNilCheck",
argLen: 2,

View File

@ -41,6 +41,30 @@ func rewriteValueMIPS64(v *Value) bool {
return rewriteValueMIPS64_OpAnd8_0(v)
case OpAndB:
return rewriteValueMIPS64_OpAndB_0(v)
case OpAtomicAdd32:
return rewriteValueMIPS64_OpAtomicAdd32_0(v)
case OpAtomicAdd64:
return rewriteValueMIPS64_OpAtomicAdd64_0(v)
case OpAtomicCompareAndSwap32:
return rewriteValueMIPS64_OpAtomicCompareAndSwap32_0(v)
case OpAtomicCompareAndSwap64:
return rewriteValueMIPS64_OpAtomicCompareAndSwap64_0(v)
case OpAtomicExchange32:
return rewriteValueMIPS64_OpAtomicExchange32_0(v)
case OpAtomicExchange64:
return rewriteValueMIPS64_OpAtomicExchange64_0(v)
case OpAtomicLoad32:
return rewriteValueMIPS64_OpAtomicLoad32_0(v)
case OpAtomicLoad64:
return rewriteValueMIPS64_OpAtomicLoad64_0(v)
case OpAtomicLoadPtr:
return rewriteValueMIPS64_OpAtomicLoadPtr_0(v)
case OpAtomicStore32:
return rewriteValueMIPS64_OpAtomicStore32_0(v)
case OpAtomicStore64:
return rewriteValueMIPS64_OpAtomicStore64_0(v)
case OpAtomicStorePtrNoWB:
return rewriteValueMIPS64_OpAtomicStorePtrNoWB_0(v)
case OpAvg64u:
return rewriteValueMIPS64_OpAvg64u_0(v)
case OpClosureCall:
@ -269,6 +293,14 @@ func rewriteValueMIPS64(v *Value) bool {
return rewriteValueMIPS64_OpMIPS64AND_0(v)
case OpMIPS64ANDconst:
return rewriteValueMIPS64_OpMIPS64ANDconst_0(v)
case OpMIPS64LoweredAtomicAdd32:
return rewriteValueMIPS64_OpMIPS64LoweredAtomicAdd32_0(v)
case OpMIPS64LoweredAtomicAdd64:
return rewriteValueMIPS64_OpMIPS64LoweredAtomicAdd64_0(v)
case OpMIPS64LoweredAtomicStore32:
return rewriteValueMIPS64_OpMIPS64LoweredAtomicStore32_0(v)
case OpMIPS64LoweredAtomicStore64:
return rewriteValueMIPS64_OpMIPS64LoweredAtomicStore64_0(v)
case OpMIPS64MOVBUload:
return rewriteValueMIPS64_OpMIPS64MOVBUload_0(v)
case OpMIPS64MOVBUreg:
@ -755,6 +787,196 @@ func rewriteValueMIPS64_OpAndB_0(v *Value) bool {
return true
}
}
func rewriteValueMIPS64_OpAtomicAdd32_0(v *Value) bool {
// match: (AtomicAdd32 ptr val mem)
// cond:
// result: (LoweredAtomicAdd32 ptr val mem)
for {
_ = v.Args[2]
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
v.reset(OpMIPS64LoweredAtomicAdd32)
v.AddArg(ptr)
v.AddArg(val)
v.AddArg(mem)
return true
}
}
func rewriteValueMIPS64_OpAtomicAdd64_0(v *Value) bool {
// match: (AtomicAdd64 ptr val mem)
// cond:
// result: (LoweredAtomicAdd64 ptr val mem)
for {
_ = v.Args[2]
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
v.reset(OpMIPS64LoweredAtomicAdd64)
v.AddArg(ptr)
v.AddArg(val)
v.AddArg(mem)
return true
}
}
func rewriteValueMIPS64_OpAtomicCompareAndSwap32_0(v *Value) bool {
// match: (AtomicCompareAndSwap32 ptr old new_ mem)
// cond:
// result: (LoweredAtomicCas32 ptr old new_ mem)
for {
_ = v.Args[3]
ptr := v.Args[0]
old := v.Args[1]
new_ := v.Args[2]
mem := v.Args[3]
v.reset(OpMIPS64LoweredAtomicCas32)
v.AddArg(ptr)
v.AddArg(old)
v.AddArg(new_)
v.AddArg(mem)
return true
}
}
func rewriteValueMIPS64_OpAtomicCompareAndSwap64_0(v *Value) bool {
// match: (AtomicCompareAndSwap64 ptr old new_ mem)
// cond:
// result: (LoweredAtomicCas64 ptr old new_ mem)
for {
_ = v.Args[3]
ptr := v.Args[0]
old := v.Args[1]
new_ := v.Args[2]
mem := v.Args[3]
v.reset(OpMIPS64LoweredAtomicCas64)
v.AddArg(ptr)
v.AddArg(old)
v.AddArg(new_)
v.AddArg(mem)
return true
}
}
func rewriteValueMIPS64_OpAtomicExchange32_0(v *Value) bool {
// match: (AtomicExchange32 ptr val mem)
// cond:
// result: (LoweredAtomicExchange32 ptr val mem)
for {
_ = v.Args[2]
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
v.reset(OpMIPS64LoweredAtomicExchange32)
v.AddArg(ptr)
v.AddArg(val)
v.AddArg(mem)
return true
}
}
func rewriteValueMIPS64_OpAtomicExchange64_0(v *Value) bool {
// match: (AtomicExchange64 ptr val mem)
// cond:
// result: (LoweredAtomicExchange64 ptr val mem)
for {
_ = v.Args[2]
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
v.reset(OpMIPS64LoweredAtomicExchange64)
v.AddArg(ptr)
v.AddArg(val)
v.AddArg(mem)
return true
}
}
func rewriteValueMIPS64_OpAtomicLoad32_0(v *Value) bool {
// match: (AtomicLoad32 ptr mem)
// cond:
// result: (LoweredAtomicLoad32 ptr mem)
for {
_ = v.Args[1]
ptr := v.Args[0]
mem := v.Args[1]
v.reset(OpMIPS64LoweredAtomicLoad32)
v.AddArg(ptr)
v.AddArg(mem)
return true
}
}
func rewriteValueMIPS64_OpAtomicLoad64_0(v *Value) bool {
// match: (AtomicLoad64 ptr mem)
// cond:
// result: (LoweredAtomicLoad64 ptr mem)
for {
_ = v.Args[1]
ptr := v.Args[0]
mem := v.Args[1]
v.reset(OpMIPS64LoweredAtomicLoad64)
v.AddArg(ptr)
v.AddArg(mem)
return true
}
}
func rewriteValueMIPS64_OpAtomicLoadPtr_0(v *Value) bool {
// match: (AtomicLoadPtr ptr mem)
// cond:
// result: (LoweredAtomicLoad64 ptr mem)
for {
_ = v.Args[1]
ptr := v.Args[0]
mem := v.Args[1]
v.reset(OpMIPS64LoweredAtomicLoad64)
v.AddArg(ptr)
v.AddArg(mem)
return true
}
}
func rewriteValueMIPS64_OpAtomicStore32_0(v *Value) bool {
// match: (AtomicStore32 ptr val mem)
// cond:
// result: (LoweredAtomicStore32 ptr val mem)
for {
_ = v.Args[2]
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
v.reset(OpMIPS64LoweredAtomicStore32)
v.AddArg(ptr)
v.AddArg(val)
v.AddArg(mem)
return true
}
}
func rewriteValueMIPS64_OpAtomicStore64_0(v *Value) bool {
// match: (AtomicStore64 ptr val mem)
// cond:
// result: (LoweredAtomicStore64 ptr val mem)
for {
_ = v.Args[2]
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
v.reset(OpMIPS64LoweredAtomicStore64)
v.AddArg(ptr)
v.AddArg(val)
v.AddArg(mem)
return true
}
}
func rewriteValueMIPS64_OpAtomicStorePtrNoWB_0(v *Value) bool {
// match: (AtomicStorePtrNoWB ptr val mem)
// cond:
// result: (LoweredAtomicStore64 ptr val mem)
for {
_ = v.Args[2]
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
v.reset(OpMIPS64LoweredAtomicStore64)
v.AddArg(ptr)
v.AddArg(val)
v.AddArg(mem)
return true
}
}
func rewriteValueMIPS64_OpAvg64u_0(v *Value) bool {
b := v.Block
_ = b
@ -3486,6 +3708,98 @@ func rewriteValueMIPS64_OpMIPS64ANDconst_0(v *Value) bool {
}
return false
}
func rewriteValueMIPS64_OpMIPS64LoweredAtomicAdd32_0(v *Value) bool {
// match: (LoweredAtomicAdd32 ptr (MOVVconst [c]) mem)
// cond: is32Bit(c)
// result: (LoweredAtomicAddconst32 [c] ptr mem)
for {
_ = v.Args[2]
ptr := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpMIPS64MOVVconst {
break
}
c := v_1.AuxInt
mem := v.Args[2]
if !(is32Bit(c)) {
break
}
v.reset(OpMIPS64LoweredAtomicAddconst32)
v.AuxInt = c
v.AddArg(ptr)
v.AddArg(mem)
return true
}
return false
}
func rewriteValueMIPS64_OpMIPS64LoweredAtomicAdd64_0(v *Value) bool {
// match: (LoweredAtomicAdd64 ptr (MOVVconst [c]) mem)
// cond: is32Bit(c)
// result: (LoweredAtomicAddconst64 [c] ptr mem)
for {
_ = v.Args[2]
ptr := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpMIPS64MOVVconst {
break
}
c := v_1.AuxInt
mem := v.Args[2]
if !(is32Bit(c)) {
break
}
v.reset(OpMIPS64LoweredAtomicAddconst64)
v.AuxInt = c
v.AddArg(ptr)
v.AddArg(mem)
return true
}
return false
}
func rewriteValueMIPS64_OpMIPS64LoweredAtomicStore32_0(v *Value) bool {
// match: (LoweredAtomicStore32 ptr (MOVVconst [0]) mem)
// cond:
// result: (LoweredAtomicStorezero32 ptr mem)
for {
_ = v.Args[2]
ptr := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpMIPS64MOVVconst {
break
}
if v_1.AuxInt != 0 {
break
}
mem := v.Args[2]
v.reset(OpMIPS64LoweredAtomicStorezero32)
v.AddArg(ptr)
v.AddArg(mem)
return true
}
return false
}
func rewriteValueMIPS64_OpMIPS64LoweredAtomicStore64_0(v *Value) bool {
// match: (LoweredAtomicStore64 ptr (MOVVconst [0]) mem)
// cond:
// result: (LoweredAtomicStorezero64 ptr mem)
for {
_ = v.Args[2]
ptr := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpMIPS64MOVVconst {
break
}
if v_1.AuxInt != 0 {
break
}
mem := v.Args[2]
v.reset(OpMIPS64LoweredAtomicStorezero64)
v.AddArg(ptr)
v.AddArg(mem)
return true
}
return false
}
func rewriteValueMIPS64_OpMIPS64MOVBUload_0(v *Value) bool {
// match: (MOVBUload [off1] {sym} (ADDVconst [off2] ptr) mem)
// cond: is32Bit(off1+off2)

View File

@ -1,5 +1,5 @@
// errorcheck -0 -d=ssa/intrinsics/debug
// +build amd64 arm64
// +build amd64 arm64 mips mipsle mips64 mips64le ppc64 ppc64le s390x
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style