mirror of
https://github.com/golang/go
synced 2024-11-26 05:27:57 -07:00
[dev.ssa] cmd/compile: support NaCl in SSA for ARM
NaCl code runs in sandbox and there are restrictions for its instruction uses (https://developer.chrome.com/native-client/reference/sandbox_internals/arm-32-bit-sandbox). Like the legacy backend, on NaCl, - don't use R9, which is used as NaCl's "thread pointer". - don't use Duff's device. - don't use indexed load/stores. - the assembler rewrites DIV/MOD to runtime calls, which on NaCl clobbers R12, so R12 is marked as clobbered for DIV/MOD. - other restrictions are satisfied by the assembler. Enable SSA specific tests on nacl/arm, and disable non-SSA ones. Updates #15365. Change-Id: I9262693ec6756b89ca29d3ae4e52a96fe5403b02 Reviewed-on: https://go-review.googlesource.com/24859 Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
This commit is contained in:
parent
7d70f84f54
commit
6b6de15d32
@ -37,12 +37,7 @@ func shouldssa(fn *Node) bool {
|
|||||||
if os.Getenv("SSATEST") == "" {
|
if os.Getenv("SSATEST") == "" {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
case "arm":
|
case "amd64", "arm":
|
||||||
// nacl/arm doesn't work yet
|
|
||||||
if obj.Getgoos() == "nacl" && os.Getenv("SSATEST") == "" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
case "amd64":
|
|
||||||
// Generally available.
|
// Generally available.
|
||||||
}
|
}
|
||||||
if !ssaEnabled {
|
if !ssaEnabled {
|
||||||
|
@ -30,6 +30,7 @@ type Config struct {
|
|||||||
ctxt *obj.Link // Generic arch information
|
ctxt *obj.Link // Generic arch information
|
||||||
optimize bool // Do optimization
|
optimize bool // Do optimization
|
||||||
noDuffDevice bool // Don't use Duff's device
|
noDuffDevice bool // Don't use Duff's device
|
||||||
|
nacl bool // GOOS=nacl
|
||||||
sparsePhiCutoff uint64 // Sparse phi location algorithm used above this #blocks*#variables score
|
sparsePhiCutoff uint64 // Sparse phi location algorithm used above this #blocks*#variables score
|
||||||
curFunc *Func
|
curFunc *Func
|
||||||
|
|
||||||
@ -175,13 +176,25 @@ func NewConfig(arch string, fe Frontend, ctxt *obj.Link, optimize bool) *Config
|
|||||||
}
|
}
|
||||||
c.ctxt = ctxt
|
c.ctxt = ctxt
|
||||||
c.optimize = optimize
|
c.optimize = optimize
|
||||||
|
c.nacl = obj.Getgoos() == "nacl"
|
||||||
|
|
||||||
// Don't use Duff's device on Plan 9, because floating
|
// Don't use Duff's device on Plan 9 AMD64, because floating
|
||||||
// point operations are not allowed in note handler.
|
// point operations are not allowed in note handler.
|
||||||
if obj.Getgoos() == "plan9" {
|
if obj.Getgoos() == "plan9" && arch == "amd64" {
|
||||||
c.noDuffDevice = true
|
c.noDuffDevice = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if c.nacl {
|
||||||
|
c.noDuffDevice = true // Don't use Duff's device on NaCl
|
||||||
|
|
||||||
|
// ARM assembler rewrites DIV/MOD to runtime calls, which
|
||||||
|
// clobber R12 on nacl
|
||||||
|
opcodeTable[OpARMDIV].reg.clobbers |= 1 << 12 // R12
|
||||||
|
opcodeTable[OpARMDIVU].reg.clobbers |= 1 << 12 // R12
|
||||||
|
opcodeTable[OpARMMOD].reg.clobbers |= 1 << 12 // R12
|
||||||
|
opcodeTable[OpARMMODU].reg.clobbers |= 1 << 12 // R12
|
||||||
|
}
|
||||||
|
|
||||||
// Assign IDs to preallocated values/blocks.
|
// Assign IDs to preallocated values/blocks.
|
||||||
for i := range c.values {
|
for i := range c.values {
|
||||||
c.values[i].ID = ID(i)
|
c.values[i].ID = ID(i)
|
||||||
|
@ -298,12 +298,12 @@
|
|||||||
// 4 and 128 are magic constants, see runtime/mkduff.go
|
// 4 and 128 are magic constants, see runtime/mkduff.go
|
||||||
(Zero [s] ptr mem)
|
(Zero [s] ptr mem)
|
||||||
&& SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512
|
&& SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512
|
||||||
&& SizeAndAlign(s).Align()%4 == 0 ->
|
&& SizeAndAlign(s).Align()%4 == 0 && !config.noDuffDevice ->
|
||||||
(DUFFZERO [4 * (128 - int64(SizeAndAlign(s).Size()/4))] ptr (MOVWconst [0]) mem)
|
(DUFFZERO [4 * (128 - int64(SizeAndAlign(s).Size()/4))] ptr (MOVWconst [0]) mem)
|
||||||
|
|
||||||
// Large zeroing uses a loop
|
// Large zeroing uses a loop
|
||||||
(Zero [s] ptr mem)
|
(Zero [s] ptr mem)
|
||||||
&& SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 512
|
&& SizeAndAlign(s).Size()%4 == 0 && (SizeAndAlign(s).Size() > 512 || config.noDuffDevice)
|
||||||
&& SizeAndAlign(s).Align()%4 == 0 ->
|
&& SizeAndAlign(s).Align()%4 == 0 ->
|
||||||
(LoweredZero ptr (ADDconst <ptr.Type> ptr [SizeAndAlign(s).Size()]) (MOVWconst [0]) mem)
|
(LoweredZero ptr (ADDconst <ptr.Type> ptr [SizeAndAlign(s).Size()]) (MOVWconst [0]) mem)
|
||||||
|
|
||||||
@ -339,12 +339,12 @@
|
|||||||
// 8 and 128 are magic constants, see runtime/mkduff.go
|
// 8 and 128 are magic constants, see runtime/mkduff.go
|
||||||
(Move [s] dst src mem)
|
(Move [s] dst src mem)
|
||||||
&& SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512
|
&& SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512
|
||||||
&& SizeAndAlign(s).Align()%4 == 0 ->
|
&& SizeAndAlign(s).Align()%4 == 0 && !config.noDuffDevice ->
|
||||||
(DUFFCOPY [8 * (128 - int64(SizeAndAlign(s).Size()/4))] dst src mem)
|
(DUFFCOPY [8 * (128 - int64(SizeAndAlign(s).Size()/4))] dst src mem)
|
||||||
|
|
||||||
// Large move uses a loop
|
// Large move uses a loop
|
||||||
(Move [s] dst src mem)
|
(Move [s] dst src mem)
|
||||||
&& SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 512
|
&& SizeAndAlign(s).Size()%4 == 0 && (SizeAndAlign(s).Size() > 512 || config.noDuffDevice)
|
||||||
&& SizeAndAlign(s).Align()%4 == 0 ->
|
&& SizeAndAlign(s).Align()%4 == 0 ->
|
||||||
(LoweredMove dst src (ADDconst <src.Type> src [SizeAndAlign(s).Size()]) mem)
|
(LoweredMove dst src (ADDconst <src.Type> src [SizeAndAlign(s).Size()]) mem)
|
||||||
|
|
||||||
@ -1128,14 +1128,14 @@
|
|||||||
(CMPshiftRAreg x y (MOVWconst [c])) -> (CMPshiftRA x y [c])
|
(CMPshiftRAreg x y (MOVWconst [c])) -> (CMPshiftRA x y [c])
|
||||||
|
|
||||||
// use indexed loads and stores
|
// use indexed loads and stores
|
||||||
(MOVWload [0] {sym} (ADD ptr idx) mem) && sym == nil -> (MOVWloadidx ptr idx mem)
|
(MOVWload [0] {sym} (ADD ptr idx) mem) && sym == nil && !config.nacl -> (MOVWloadidx ptr idx mem)
|
||||||
(MOVWstore [0] {sym} (ADD ptr idx) val mem) && sym == nil -> (MOVWstoreidx ptr idx val mem)
|
(MOVWstore [0] {sym} (ADD ptr idx) val mem) && sym == nil && !config.nacl -> (MOVWstoreidx ptr idx val mem)
|
||||||
(MOVWload [0] {sym} (ADDshiftLL ptr idx [c]) mem) && sym == nil -> (MOVWloadshiftLL ptr idx [c] mem)
|
(MOVWload [0] {sym} (ADDshiftLL ptr idx [c]) mem) && sym == nil && !config.nacl -> (MOVWloadshiftLL ptr idx [c] mem)
|
||||||
(MOVWload [0] {sym} (ADDshiftRL ptr idx [c]) mem) && sym == nil -> (MOVWloadshiftRL ptr idx [c] mem)
|
(MOVWload [0] {sym} (ADDshiftRL ptr idx [c]) mem) && sym == nil && !config.nacl -> (MOVWloadshiftRL ptr idx [c] mem)
|
||||||
(MOVWload [0] {sym} (ADDshiftRA ptr idx [c]) mem) && sym == nil -> (MOVWloadshiftRA ptr idx [c] mem)
|
(MOVWload [0] {sym} (ADDshiftRA ptr idx [c]) mem) && sym == nil && !config.nacl -> (MOVWloadshiftRA ptr idx [c] mem)
|
||||||
(MOVWstore [0] {sym} (ADDshiftLL ptr idx [c]) val mem) && sym == nil -> (MOVWstoreshiftLL ptr idx [c] val mem)
|
(MOVWstore [0] {sym} (ADDshiftLL ptr idx [c]) val mem) && sym == nil && !config.nacl -> (MOVWstoreshiftLL ptr idx [c] val mem)
|
||||||
(MOVWstore [0] {sym} (ADDshiftRL ptr idx [c]) val mem) && sym == nil -> (MOVWstoreshiftRL ptr idx [c] val mem)
|
(MOVWstore [0] {sym} (ADDshiftRL ptr idx [c]) val mem) && sym == nil && !config.nacl -> (MOVWstoreshiftRL ptr idx [c] val mem)
|
||||||
(MOVWstore [0] {sym} (ADDshiftRA ptr idx [c]) val mem) && sym == nil -> (MOVWstoreshiftRA ptr idx [c] val mem)
|
(MOVWstore [0] {sym} (ADDshiftRA ptr idx [c]) val mem) && sym == nil && !config.nacl -> (MOVWstoreshiftRA ptr idx [c] val mem)
|
||||||
|
|
||||||
// constant folding in indexed loads and stores
|
// constant folding in indexed loads and stores
|
||||||
(MOVWloadidx ptr (MOVWconst [c]) mem) -> (MOVWload [c] ptr mem)
|
(MOVWloadidx ptr (MOVWconst [c]) mem) -> (MOVWload [c] ptr mem)
|
||||||
|
@ -490,6 +490,9 @@ func (s *regAllocState) init(f *Func) {
|
|||||||
s.f.Config.fe.Unimplementedf(0, "arch %s not implemented", s.f.Config.arch)
|
s.f.Config.fe.Unimplementedf(0, "arch %s not implemented", s.f.Config.arch)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if s.f.Config.nacl && s.f.Config.arch == "arm" {
|
||||||
|
s.allocatable &^= 1 << 9 // R9 is "thread pointer" on nacl/arm
|
||||||
|
}
|
||||||
|
|
||||||
s.regs = make([]regState, s.numRegs)
|
s.regs = make([]regState, s.numRegs)
|
||||||
s.values = make([]valState, f.NumValues())
|
s.values = make([]valState, f.NumValues())
|
||||||
|
@ -8382,7 +8382,7 @@ func rewriteValueARM_OpARMMOVWload(v *Value, config *Config) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (MOVWload [0] {sym} (ADD ptr idx) mem)
|
// match: (MOVWload [0] {sym} (ADD ptr idx) mem)
|
||||||
// cond: sym == nil
|
// cond: sym == nil && !config.nacl
|
||||||
// result: (MOVWloadidx ptr idx mem)
|
// result: (MOVWloadidx ptr idx mem)
|
||||||
for {
|
for {
|
||||||
if v.AuxInt != 0 {
|
if v.AuxInt != 0 {
|
||||||
@ -8396,7 +8396,7 @@ func rewriteValueARM_OpARMMOVWload(v *Value, config *Config) bool {
|
|||||||
ptr := v_0.Args[0]
|
ptr := v_0.Args[0]
|
||||||
idx := v_0.Args[1]
|
idx := v_0.Args[1]
|
||||||
mem := v.Args[1]
|
mem := v.Args[1]
|
||||||
if !(sym == nil) {
|
if !(sym == nil && !config.nacl) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
v.reset(OpARMMOVWloadidx)
|
v.reset(OpARMMOVWloadidx)
|
||||||
@ -8406,7 +8406,7 @@ func rewriteValueARM_OpARMMOVWload(v *Value, config *Config) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (MOVWload [0] {sym} (ADDshiftLL ptr idx [c]) mem)
|
// match: (MOVWload [0] {sym} (ADDshiftLL ptr idx [c]) mem)
|
||||||
// cond: sym == nil
|
// cond: sym == nil && !config.nacl
|
||||||
// result: (MOVWloadshiftLL ptr idx [c] mem)
|
// result: (MOVWloadshiftLL ptr idx [c] mem)
|
||||||
for {
|
for {
|
||||||
if v.AuxInt != 0 {
|
if v.AuxInt != 0 {
|
||||||
@ -8421,7 +8421,7 @@ func rewriteValueARM_OpARMMOVWload(v *Value, config *Config) bool {
|
|||||||
idx := v_0.Args[1]
|
idx := v_0.Args[1]
|
||||||
c := v_0.AuxInt
|
c := v_0.AuxInt
|
||||||
mem := v.Args[1]
|
mem := v.Args[1]
|
||||||
if !(sym == nil) {
|
if !(sym == nil && !config.nacl) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
v.reset(OpARMMOVWloadshiftLL)
|
v.reset(OpARMMOVWloadshiftLL)
|
||||||
@ -8432,7 +8432,7 @@ func rewriteValueARM_OpARMMOVWload(v *Value, config *Config) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (MOVWload [0] {sym} (ADDshiftRL ptr idx [c]) mem)
|
// match: (MOVWload [0] {sym} (ADDshiftRL ptr idx [c]) mem)
|
||||||
// cond: sym == nil
|
// cond: sym == nil && !config.nacl
|
||||||
// result: (MOVWloadshiftRL ptr idx [c] mem)
|
// result: (MOVWloadshiftRL ptr idx [c] mem)
|
||||||
for {
|
for {
|
||||||
if v.AuxInt != 0 {
|
if v.AuxInt != 0 {
|
||||||
@ -8447,7 +8447,7 @@ func rewriteValueARM_OpARMMOVWload(v *Value, config *Config) bool {
|
|||||||
idx := v_0.Args[1]
|
idx := v_0.Args[1]
|
||||||
c := v_0.AuxInt
|
c := v_0.AuxInt
|
||||||
mem := v.Args[1]
|
mem := v.Args[1]
|
||||||
if !(sym == nil) {
|
if !(sym == nil && !config.nacl) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
v.reset(OpARMMOVWloadshiftRL)
|
v.reset(OpARMMOVWloadshiftRL)
|
||||||
@ -8458,7 +8458,7 @@ func rewriteValueARM_OpARMMOVWload(v *Value, config *Config) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (MOVWload [0] {sym} (ADDshiftRA ptr idx [c]) mem)
|
// match: (MOVWload [0] {sym} (ADDshiftRA ptr idx [c]) mem)
|
||||||
// cond: sym == nil
|
// cond: sym == nil && !config.nacl
|
||||||
// result: (MOVWloadshiftRA ptr idx [c] mem)
|
// result: (MOVWloadshiftRA ptr idx [c] mem)
|
||||||
for {
|
for {
|
||||||
if v.AuxInt != 0 {
|
if v.AuxInt != 0 {
|
||||||
@ -8473,7 +8473,7 @@ func rewriteValueARM_OpARMMOVWload(v *Value, config *Config) bool {
|
|||||||
idx := v_0.Args[1]
|
idx := v_0.Args[1]
|
||||||
c := v_0.AuxInt
|
c := v_0.AuxInt
|
||||||
mem := v.Args[1]
|
mem := v.Args[1]
|
||||||
if !(sym == nil) {
|
if !(sym == nil && !config.nacl) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
v.reset(OpARMMOVWloadshiftRA)
|
v.reset(OpARMMOVWloadshiftRA)
|
||||||
@ -8875,7 +8875,7 @@ func rewriteValueARM_OpARMMOVWstore(v *Value, config *Config) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (MOVWstore [0] {sym} (ADD ptr idx) val mem)
|
// match: (MOVWstore [0] {sym} (ADD ptr idx) val mem)
|
||||||
// cond: sym == nil
|
// cond: sym == nil && !config.nacl
|
||||||
// result: (MOVWstoreidx ptr idx val mem)
|
// result: (MOVWstoreidx ptr idx val mem)
|
||||||
for {
|
for {
|
||||||
if v.AuxInt != 0 {
|
if v.AuxInt != 0 {
|
||||||
@ -8890,7 +8890,7 @@ func rewriteValueARM_OpARMMOVWstore(v *Value, config *Config) bool {
|
|||||||
idx := v_0.Args[1]
|
idx := v_0.Args[1]
|
||||||
val := v.Args[1]
|
val := v.Args[1]
|
||||||
mem := v.Args[2]
|
mem := v.Args[2]
|
||||||
if !(sym == nil) {
|
if !(sym == nil && !config.nacl) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
v.reset(OpARMMOVWstoreidx)
|
v.reset(OpARMMOVWstoreidx)
|
||||||
@ -8901,7 +8901,7 @@ func rewriteValueARM_OpARMMOVWstore(v *Value, config *Config) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (MOVWstore [0] {sym} (ADDshiftLL ptr idx [c]) val mem)
|
// match: (MOVWstore [0] {sym} (ADDshiftLL ptr idx [c]) val mem)
|
||||||
// cond: sym == nil
|
// cond: sym == nil && !config.nacl
|
||||||
// result: (MOVWstoreshiftLL ptr idx [c] val mem)
|
// result: (MOVWstoreshiftLL ptr idx [c] val mem)
|
||||||
for {
|
for {
|
||||||
if v.AuxInt != 0 {
|
if v.AuxInt != 0 {
|
||||||
@ -8917,7 +8917,7 @@ func rewriteValueARM_OpARMMOVWstore(v *Value, config *Config) bool {
|
|||||||
c := v_0.AuxInt
|
c := v_0.AuxInt
|
||||||
val := v.Args[1]
|
val := v.Args[1]
|
||||||
mem := v.Args[2]
|
mem := v.Args[2]
|
||||||
if !(sym == nil) {
|
if !(sym == nil && !config.nacl) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
v.reset(OpARMMOVWstoreshiftLL)
|
v.reset(OpARMMOVWstoreshiftLL)
|
||||||
@ -8929,7 +8929,7 @@ func rewriteValueARM_OpARMMOVWstore(v *Value, config *Config) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (MOVWstore [0] {sym} (ADDshiftRL ptr idx [c]) val mem)
|
// match: (MOVWstore [0] {sym} (ADDshiftRL ptr idx [c]) val mem)
|
||||||
// cond: sym == nil
|
// cond: sym == nil && !config.nacl
|
||||||
// result: (MOVWstoreshiftRL ptr idx [c] val mem)
|
// result: (MOVWstoreshiftRL ptr idx [c] val mem)
|
||||||
for {
|
for {
|
||||||
if v.AuxInt != 0 {
|
if v.AuxInt != 0 {
|
||||||
@ -8945,7 +8945,7 @@ func rewriteValueARM_OpARMMOVWstore(v *Value, config *Config) bool {
|
|||||||
c := v_0.AuxInt
|
c := v_0.AuxInt
|
||||||
val := v.Args[1]
|
val := v.Args[1]
|
||||||
mem := v.Args[2]
|
mem := v.Args[2]
|
||||||
if !(sym == nil) {
|
if !(sym == nil && !config.nacl) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
v.reset(OpARMMOVWstoreshiftRL)
|
v.reset(OpARMMOVWstoreshiftRL)
|
||||||
@ -8957,7 +8957,7 @@ func rewriteValueARM_OpARMMOVWstore(v *Value, config *Config) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (MOVWstore [0] {sym} (ADDshiftRA ptr idx [c]) val mem)
|
// match: (MOVWstore [0] {sym} (ADDshiftRA ptr idx [c]) val mem)
|
||||||
// cond: sym == nil
|
// cond: sym == nil && !config.nacl
|
||||||
// result: (MOVWstoreshiftRA ptr idx [c] val mem)
|
// result: (MOVWstoreshiftRA ptr idx [c] val mem)
|
||||||
for {
|
for {
|
||||||
if v.AuxInt != 0 {
|
if v.AuxInt != 0 {
|
||||||
@ -8973,7 +8973,7 @@ func rewriteValueARM_OpARMMOVWstore(v *Value, config *Config) bool {
|
|||||||
c := v_0.AuxInt
|
c := v_0.AuxInt
|
||||||
val := v.Args[1]
|
val := v.Args[1]
|
||||||
mem := v.Args[2]
|
mem := v.Args[2]
|
||||||
if !(sym == nil) {
|
if !(sym == nil && !config.nacl) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
v.reset(OpARMMOVWstoreshiftRA)
|
v.reset(OpARMMOVWstoreshiftRA)
|
||||||
@ -10670,14 +10670,14 @@ func rewriteValueARM_OpMove(v *Value, config *Config) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (Move [s] dst src mem)
|
// match: (Move [s] dst src mem)
|
||||||
// cond: SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512 && SizeAndAlign(s).Align()%4 == 0
|
// cond: SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512 && SizeAndAlign(s).Align()%4 == 0 && !config.noDuffDevice
|
||||||
// result: (DUFFCOPY [8 * (128 - int64(SizeAndAlign(s).Size()/4))] dst src mem)
|
// result: (DUFFCOPY [8 * (128 - int64(SizeAndAlign(s).Size()/4))] dst src mem)
|
||||||
for {
|
for {
|
||||||
s := v.AuxInt
|
s := v.AuxInt
|
||||||
dst := v.Args[0]
|
dst := v.Args[0]
|
||||||
src := v.Args[1]
|
src := v.Args[1]
|
||||||
mem := v.Args[2]
|
mem := v.Args[2]
|
||||||
if !(SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512 && SizeAndAlign(s).Align()%4 == 0) {
|
if !(SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512 && SizeAndAlign(s).Align()%4 == 0 && !config.noDuffDevice) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
v.reset(OpARMDUFFCOPY)
|
v.reset(OpARMDUFFCOPY)
|
||||||
@ -10688,14 +10688,14 @@ func rewriteValueARM_OpMove(v *Value, config *Config) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (Move [s] dst src mem)
|
// match: (Move [s] dst src mem)
|
||||||
// cond: SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 512 && SizeAndAlign(s).Align()%4 == 0
|
// cond: SizeAndAlign(s).Size()%4 == 0 && (SizeAndAlign(s).Size() > 512 || config.noDuffDevice) && SizeAndAlign(s).Align()%4 == 0
|
||||||
// result: (LoweredMove dst src (ADDconst <src.Type> src [SizeAndAlign(s).Size()]) mem)
|
// result: (LoweredMove dst src (ADDconst <src.Type> src [SizeAndAlign(s).Size()]) mem)
|
||||||
for {
|
for {
|
||||||
s := v.AuxInt
|
s := v.AuxInt
|
||||||
dst := v.Args[0]
|
dst := v.Args[0]
|
||||||
src := v.Args[1]
|
src := v.Args[1]
|
||||||
mem := v.Args[2]
|
mem := v.Args[2]
|
||||||
if !(SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 512 && SizeAndAlign(s).Align()%4 == 0) {
|
if !(SizeAndAlign(s).Size()%4 == 0 && (SizeAndAlign(s).Size() > 512 || config.noDuffDevice) && SizeAndAlign(s).Align()%4 == 0) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
v.reset(OpARMLoweredMove)
|
v.reset(OpARMLoweredMove)
|
||||||
@ -16786,13 +16786,13 @@ func rewriteValueARM_OpZero(v *Value, config *Config) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (Zero [s] ptr mem)
|
// match: (Zero [s] ptr mem)
|
||||||
// cond: SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512 && SizeAndAlign(s).Align()%4 == 0
|
// cond: SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512 && SizeAndAlign(s).Align()%4 == 0 && !config.noDuffDevice
|
||||||
// result: (DUFFZERO [4 * (128 - int64(SizeAndAlign(s).Size()/4))] ptr (MOVWconst [0]) mem)
|
// result: (DUFFZERO [4 * (128 - int64(SizeAndAlign(s).Size()/4))] ptr (MOVWconst [0]) mem)
|
||||||
for {
|
for {
|
||||||
s := v.AuxInt
|
s := v.AuxInt
|
||||||
ptr := v.Args[0]
|
ptr := v.Args[0]
|
||||||
mem := v.Args[1]
|
mem := v.Args[1]
|
||||||
if !(SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512 && SizeAndAlign(s).Align()%4 == 0) {
|
if !(SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512 && SizeAndAlign(s).Align()%4 == 0 && !config.noDuffDevice) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
v.reset(OpARMDUFFZERO)
|
v.reset(OpARMDUFFZERO)
|
||||||
@ -16805,13 +16805,13 @@ func rewriteValueARM_OpZero(v *Value, config *Config) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (Zero [s] ptr mem)
|
// match: (Zero [s] ptr mem)
|
||||||
// cond: SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 512 && SizeAndAlign(s).Align()%4 == 0
|
// cond: SizeAndAlign(s).Size()%4 == 0 && (SizeAndAlign(s).Size() > 512 || config.noDuffDevice) && SizeAndAlign(s).Align()%4 == 0
|
||||||
// result: (LoweredZero ptr (ADDconst <ptr.Type> ptr [SizeAndAlign(s).Size()]) (MOVWconst [0]) mem)
|
// result: (LoweredZero ptr (ADDconst <ptr.Type> ptr [SizeAndAlign(s).Size()]) (MOVWconst [0]) mem)
|
||||||
for {
|
for {
|
||||||
s := v.AuxInt
|
s := v.AuxInt
|
||||||
ptr := v.Args[0]
|
ptr := v.Args[0]
|
||||||
mem := v.Args[1]
|
mem := v.Args[1]
|
||||||
if !(SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 512 && SizeAndAlign(s).Align()%4 == 0) {
|
if !(SizeAndAlign(s).Size()%4 == 0 && (SizeAndAlign(s).Size() > 512 || config.noDuffDevice) && SizeAndAlign(s).Align()%4 == 0) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
v.reset(OpARMLoweredZero)
|
v.reset(OpARMLoweredZero)
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
// +build !amd64,!arm nacl,arm
|
// +build !amd64,!arm
|
||||||
// errorcheck -0 -l -live -wb=0
|
// errorcheck -0 -l -live -wb=0
|
||||||
|
|
||||||
// Copyright 2014 The Go Authors. All rights reserved.
|
// Copyright 2014 The Go Authors. All rights reserved.
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
// +build amd64 arm,!nacl
|
// +build amd64 arm
|
||||||
// errorcheck -0 -l -live -wb=0
|
// errorcheck -0 -l -live -wb=0
|
||||||
|
|
||||||
// Copyright 2014 The Go Authors. All rights reserved.
|
// Copyright 2014 The Go Authors. All rights reserved.
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
// Fails on ppc64x because of incomplete optimization.
|
// Fails on ppc64x because of incomplete optimization.
|
||||||
// See issues 9058.
|
// See issues 9058.
|
||||||
// Same reason for mips64x and s390x.
|
// Same reason for mips64x and s390x.
|
||||||
// +build !ppc64,!ppc64le,!mips64,!mips64le,!amd64,!s390x,!arm nacl,arm
|
// +build !ppc64,!ppc64le,!mips64,!mips64le,!amd64,!s390x,!arm
|
||||||
|
|
||||||
// Copyright 2013 The Go Authors. All rights reserved.
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
// Use of this source code is governed by a BSD-style
|
// Use of this source code is governed by a BSD-style
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// errorcheck -0 -d=nil
|
// errorcheck -0 -d=nil
|
||||||
// +build amd64 arm,!nacl
|
// +build amd64 arm
|
||||||
|
|
||||||
// Copyright 2013 The Go Authors. All rights reserved.
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
// Use of this source code is governed by a BSD-style
|
// Use of this source code is governed by a BSD-style
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
// +build !amd64,!arm nacl,arm
|
// +build !amd64,!arm
|
||||||
// errorcheck -0 -d=append,slice
|
// errorcheck -0 -d=append,slice
|
||||||
|
|
||||||
// Copyright 2015 The Go Authors. All rights reserved.
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
Loading…
Reference in New Issue
Block a user