mirror of
https://github.com/golang/go
synced 2024-11-12 01:00:22 -07:00
cmd/compile: intrinsify runtime/internal/atomic.{And,Or}{8,} on RISCV64
The 32 bit versions are easily implement with a single instruction, while the 8 bit versions require a bit more effort but use the same atomic instructions via rewrite rules. Change-Id: I42e8d457b239c8f75e39a8e282fc88c1bb292a99 Reviewed-on: https://go-review.googlesource.com/c/go/+/268098 Trust: Joel Sing <joel@sing.id.au> Run-TryBot: Joel Sing <joel@sing.id.au> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Cherry Zhang <cherryyz@google.com>
This commit is contained in:
parent
00cb841b83
commit
497feff168
@ -510,6 +510,14 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
||||
p6 := s.Prog(obj.ANOP)
|
||||
p2.To.SetTarget(p6)
|
||||
|
||||
case ssa.OpRISCV64LoweredAtomicAnd32, ssa.OpRISCV64LoweredAtomicOr32:
|
||||
p := s.Prog(v.Op.Asm())
|
||||
p.From.Type = obj.TYPE_REG
|
||||
p.From.Reg = v.Args[1].Reg()
|
||||
p.To.Type = obj.TYPE_MEM
|
||||
p.To.Reg = v.Args[0].Reg()
|
||||
p.RegTo2 = riscv.REG_ZERO
|
||||
|
||||
case ssa.OpRISCV64LoweredZero:
|
||||
mov, sz := largestMove(v.AuxInt)
|
||||
|
||||
|
@ -564,12 +564,28 @@
|
||||
(AtomicAdd32 ...) => (LoweredAtomicAdd32 ...)
|
||||
(AtomicAdd64 ...) => (LoweredAtomicAdd64 ...)
|
||||
|
||||
// AtomicAnd8(ptr,val) => LoweredAtomicAnd32(ptr&^3, ^((uint8(val) ^ 0xff) << ((ptr & 3) * 8)))
|
||||
(AtomicAnd8 ptr val mem) =>
|
||||
(LoweredAtomicAnd32 (ANDI <typ.Uintptr> [^3] ptr)
|
||||
(NOT <typ.UInt32> (SLL <typ.UInt32> (XORI <typ.UInt32> [0xff] (ZeroExt8to32 val))
|
||||
(SLLI <typ.UInt64> [3] (ANDI <typ.UInt64> [3] ptr)))) mem)
|
||||
|
||||
(AtomicAnd32 ...) => (LoweredAtomicAnd32 ...)
|
||||
|
||||
(AtomicCompareAndSwap32 ...) => (LoweredAtomicCas32 ...)
|
||||
(AtomicCompareAndSwap64 ...) => (LoweredAtomicCas64 ...)
|
||||
|
||||
(AtomicExchange32 ...) => (LoweredAtomicExchange32 ...)
|
||||
(AtomicExchange64 ...) => (LoweredAtomicExchange64 ...)
|
||||
|
||||
// AtomicOr8(ptr,val) => LoweredAtomicOr32(ptr&^3, uint32(val)<<((ptr&3)*8))
|
||||
(AtomicOr8 ptr val mem) =>
|
||||
(LoweredAtomicOr32 (ANDI <typ.Uintptr> [^3] ptr)
|
||||
(SLL <typ.UInt32> (ZeroExt8to32 val)
|
||||
(SLLI <typ.UInt64> [3] (ANDI <typ.UInt64> [3] ptr))) mem)
|
||||
|
||||
(AtomicOr32 ...) => (LoweredAtomicOr32 ...)
|
||||
|
||||
// Conditional branches
|
||||
(If cond yes no) => (BNEZ cond yes no)
|
||||
|
||||
|
@ -126,6 +126,7 @@ func init() {
|
||||
gp11sb = regInfo{inputs: []regMask{gpspsbMask}, outputs: []regMask{gpMask}}
|
||||
gpxchg = regInfo{inputs: []regMask{gpspsbgMask, gpgMask}, outputs: []regMask{gpMask}}
|
||||
gpcas = regInfo{inputs: []regMask{gpspsbgMask, gpgMask, gpgMask}, outputs: []regMask{gpMask}}
|
||||
gpatomic = regInfo{inputs: []regMask{gpspsbgMask, gpgMask}}
|
||||
|
||||
fp11 = regInfo{inputs: []regMask{fpMask}, outputs: []regMask{fpMask}}
|
||||
fp21 = regInfo{inputs: []regMask{fpMask, fpMask}, outputs: []regMask{fpMask}}
|
||||
@ -335,7 +336,7 @@ func init() {
|
||||
{name: "LoweredAtomicLoad64", argLength: 2, reg: gpload, faultOnNilArg0: true},
|
||||
|
||||
// Atomic stores.
|
||||
// store arg1 to arg0. arg2=mem. returns memory.
|
||||
// store arg1 to *arg0. arg2=mem. returns memory.
|
||||
{name: "LoweredAtomicStore8", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true},
|
||||
{name: "LoweredAtomicStore32", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true},
|
||||
{name: "LoweredAtomicStore64", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true},
|
||||
@ -367,6 +368,11 @@ func init() {
|
||||
{name: "LoweredAtomicCas32", argLength: 4, reg: gpcas, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
|
||||
{name: "LoweredAtomicCas64", argLength: 4, reg: gpcas, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
|
||||
|
||||
// Atomic 32 bit AND/OR.
|
||||
// *arg0 &= (|=) arg1. arg2=mem. returns nil.
|
||||
{name: "LoweredAtomicAnd32", argLength: 3, reg: gpatomic, asm: "AMOANDW", faultOnNilArg0: true, hasSideEffects: true},
|
||||
{name: "LoweredAtomicOr32", argLength: 3, reg: gpatomic, asm: "AMOORW", faultOnNilArg0: true, hasSideEffects: true},
|
||||
|
||||
// Lowering pass-throughs
|
||||
{name: "LoweredNilCheck", argLength: 2, faultOnNilArg0: true, nilCheck: true, reg: regInfo{inputs: []regMask{gpspMask}}}, // arg0=ptr,arg1=mem, returns void. Faults if ptr is nil.
|
||||
{name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{regCtxt}}}, // scheduler ensures only at beginning of entry block
|
||||
|
@ -2149,6 +2149,8 @@ const (
|
||||
OpRISCV64LoweredAtomicAdd64
|
||||
OpRISCV64LoweredAtomicCas32
|
||||
OpRISCV64LoweredAtomicCas64
|
||||
OpRISCV64LoweredAtomicAnd32
|
||||
OpRISCV64LoweredAtomicOr32
|
||||
OpRISCV64LoweredNilCheck
|
||||
OpRISCV64LoweredGetClosurePtr
|
||||
OpRISCV64LoweredGetCallerSP
|
||||
@ -28722,6 +28724,32 @@ var opcodeTable = [...]opInfo{
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "LoweredAtomicAnd32",
|
||||
argLen: 3,
|
||||
faultOnNilArg0: true,
|
||||
hasSideEffects: true,
|
||||
asm: riscv.AAMOANDW,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30
|
||||
{0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "LoweredAtomicOr32",
|
||||
argLen: 3,
|
||||
faultOnNilArg0: true,
|
||||
hasSideEffects: true,
|
||||
asm: riscv.AAMOORW,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30
|
||||
{0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "LoweredNilCheck",
|
||||
argLen: 2,
|
||||
|
@ -52,6 +52,11 @@ func rewriteValueRISCV64(v *Value) bool {
|
||||
case OpAtomicAdd64:
|
||||
v.Op = OpRISCV64LoweredAtomicAdd64
|
||||
return true
|
||||
case OpAtomicAnd32:
|
||||
v.Op = OpRISCV64LoweredAtomicAnd32
|
||||
return true
|
||||
case OpAtomicAnd8:
|
||||
return rewriteValueRISCV64_OpAtomicAnd8(v)
|
||||
case OpAtomicCompareAndSwap32:
|
||||
v.Op = OpRISCV64LoweredAtomicCas32
|
||||
return true
|
||||
@ -76,6 +81,11 @@ func rewriteValueRISCV64(v *Value) bool {
|
||||
case OpAtomicLoadPtr:
|
||||
v.Op = OpRISCV64LoweredAtomicLoad64
|
||||
return true
|
||||
case OpAtomicOr32:
|
||||
v.Op = OpRISCV64LoweredAtomicOr32
|
||||
return true
|
||||
case OpAtomicOr8:
|
||||
return rewriteValueRISCV64_OpAtomicOr8(v)
|
||||
case OpAtomicStore32:
|
||||
v.Op = OpRISCV64LoweredAtomicStore32
|
||||
return true
|
||||
@ -681,6 +691,71 @@ func rewriteValueRISCV64_OpAddr(v *Value) bool {
|
||||
return true
|
||||
}
|
||||
}
|
||||
func rewriteValueRISCV64_OpAtomicAnd8(v *Value) bool {
|
||||
v_2 := v.Args[2]
|
||||
v_1 := v.Args[1]
|
||||
v_0 := v.Args[0]
|
||||
b := v.Block
|
||||
typ := &b.Func.Config.Types
|
||||
// match: (AtomicAnd8 ptr val mem)
|
||||
// result: (LoweredAtomicAnd32 (ANDI <typ.Uintptr> [^3] ptr) (NOT <typ.UInt32> (SLL <typ.UInt32> (XORI <typ.UInt32> [0xff] (ZeroExt8to32 val)) (SLLI <typ.UInt64> [3] (ANDI <typ.UInt64> [3] ptr)))) mem)
|
||||
for {
|
||||
ptr := v_0
|
||||
val := v_1
|
||||
mem := v_2
|
||||
v.reset(OpRISCV64LoweredAtomicAnd32)
|
||||
v0 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.Uintptr)
|
||||
v0.AuxInt = int64ToAuxInt(^3)
|
||||
v0.AddArg(ptr)
|
||||
v1 := b.NewValue0(v.Pos, OpRISCV64NOT, typ.UInt32)
|
||||
v2 := b.NewValue0(v.Pos, OpRISCV64SLL, typ.UInt32)
|
||||
v3 := b.NewValue0(v.Pos, OpRISCV64XORI, typ.UInt32)
|
||||
v3.AuxInt = int64ToAuxInt(0xff)
|
||||
v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
|
||||
v4.AddArg(val)
|
||||
v3.AddArg(v4)
|
||||
v5 := b.NewValue0(v.Pos, OpRISCV64SLLI, typ.UInt64)
|
||||
v5.AuxInt = int64ToAuxInt(3)
|
||||
v6 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.UInt64)
|
||||
v6.AuxInt = int64ToAuxInt(3)
|
||||
v6.AddArg(ptr)
|
||||
v5.AddArg(v6)
|
||||
v2.AddArg2(v3, v5)
|
||||
v1.AddArg(v2)
|
||||
v.AddArg3(v0, v1, mem)
|
||||
return true
|
||||
}
|
||||
}
|
||||
func rewriteValueRISCV64_OpAtomicOr8(v *Value) bool {
|
||||
v_2 := v.Args[2]
|
||||
v_1 := v.Args[1]
|
||||
v_0 := v.Args[0]
|
||||
b := v.Block
|
||||
typ := &b.Func.Config.Types
|
||||
// match: (AtomicOr8 ptr val mem)
|
||||
// result: (LoweredAtomicOr32 (ANDI <typ.Uintptr> [^3] ptr) (SLL <typ.UInt32> (ZeroExt8to32 val) (SLLI <typ.UInt64> [3] (ANDI <typ.UInt64> [3] ptr))) mem)
|
||||
for {
|
||||
ptr := v_0
|
||||
val := v_1
|
||||
mem := v_2
|
||||
v.reset(OpRISCV64LoweredAtomicOr32)
|
||||
v0 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.Uintptr)
|
||||
v0.AuxInt = int64ToAuxInt(^3)
|
||||
v0.AddArg(ptr)
|
||||
v1 := b.NewValue0(v.Pos, OpRISCV64SLL, typ.UInt32)
|
||||
v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
|
||||
v2.AddArg(val)
|
||||
v3 := b.NewValue0(v.Pos, OpRISCV64SLLI, typ.UInt64)
|
||||
v3.AuxInt = int64ToAuxInt(3)
|
||||
v4 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.UInt64)
|
||||
v4.AuxInt = int64ToAuxInt(3)
|
||||
v4.AddArg(ptr)
|
||||
v3.AddArg(v4)
|
||||
v1.AddArg2(v2, v3)
|
||||
v.AddArg3(v0, v1, mem)
|
||||
return true
|
||||
}
|
||||
}
|
||||
func rewriteValueRISCV64_OpAvg64u(v *Value) bool {
|
||||
v_1 := v.Args[1]
|
||||
v_0 := v.Args[0]
|
||||
|
Loading…
Reference in New Issue
Block a user