1
0
mirror of https://github.com/golang/go synced 2024-11-11 21:20:21 -07:00

cmd/compile: add some generic composite type optimizations

Propagate values through some wide Zero/Move operations. Among
other things this allows us to optimize some kinds of array
initialization. For example, the following code no longer
requires a temporary be allocated on the stack. Instead it
writes the values directly into the return value.

func f(i uint32) [4]uint32 {
    return [4]uint32{i, i+1, i+2, i+3}
}

The return value is unnecessarily cleared but removing that is
probably a task for dead store analysis (I think it needs to
be able to match multiple Store ops to wide Zero ops).

In order to reliably remove stack variables that are rendered
unnecessary by these new rules I've added a new generic version
of the unread autos elimination pass.

These rules are triggered more than 5000 times when building and
testing the standard library.

Updates #15925 (fixes for arrays of up to 4 elements).
Updates #24386 (fixes for up to 4 kept elements).
Updates #24416.

compilebench results:

name       old time/op       new time/op       delta
Template         353ms ± 5%        359ms ± 3%    ~     (p=0.143 n=10+10)
Unicode          219ms ± 1%        217ms ± 4%    ~     (p=0.740 n=7+10)
GoTypes          1.26s ± 1%        1.26s ± 2%    ~     (p=0.549 n=9+10)
Compiler         6.00s ± 1%        6.08s ± 1%  +1.42%  (p=0.000 n=9+8)
SSA              15.3s ± 2%        15.6s ± 1%  +2.43%  (p=0.000 n=10+10)
Flate            237ms ± 2%        240ms ± 2%  +1.31%  (p=0.015 n=10+10)
GoParser         285ms ± 1%        285ms ± 1%    ~     (p=0.878 n=8+8)
Reflect          797ms ± 3%        807ms ± 2%    ~     (p=0.065 n=9+10)
Tar              334ms ± 0%        335ms ± 4%    ~     (p=0.460 n=8+10)
XML              419ms ± 0%        423ms ± 1%  +0.91%  (p=0.001 n=7+9)
StdCmd           46.0s ± 0%        46.4s ± 0%  +0.85%  (p=0.000 n=9+9)

name       old user-time/op  new user-time/op  delta
Template         337ms ± 3%        346ms ± 5%    ~     (p=0.053 n=9+10)
Unicode          205ms ±10%        205ms ± 8%    ~     (p=1.000 n=10+10)
GoTypes          1.22s ± 2%        1.21s ± 3%    ~     (p=0.436 n=10+10)
Compiler         5.85s ± 1%        5.93s ± 0%  +1.46%  (p=0.000 n=10+8)
SSA              14.9s ± 1%        15.3s ± 1%  +2.62%  (p=0.000 n=10+10)
Flate            229ms ± 4%        228ms ± 6%    ~     (p=0.796 n=10+10)
GoParser         271ms ± 3%        275ms ± 4%    ~     (p=0.165 n=10+10)
Reflect          779ms ± 5%        775ms ± 2%    ~     (p=0.971 n=10+10)
Tar              317ms ± 4%        319ms ± 5%    ~     (p=0.853 n=10+10)
XML              404ms ± 4%        409ms ± 5%    ~     (p=0.436 n=10+10)

name       old alloc/op      new alloc/op      delta
Template        34.9MB ± 0%       35.0MB ± 0%  +0.26%  (p=0.000 n=10+10)
Unicode         29.3MB ± 0%       29.3MB ± 0%  +0.02%  (p=0.000 n=10+10)
GoTypes          115MB ± 0%        115MB ± 0%  +0.30%  (p=0.000 n=10+10)
Compiler         519MB ± 0%        521MB ± 0%  +0.30%  (p=0.000 n=10+10)
SSA             1.55GB ± 0%       1.57GB ± 0%  +1.34%  (p=0.000 n=10+9)
Flate           24.1MB ± 0%       24.2MB ± 0%  +0.10%  (p=0.000 n=10+10)
GoParser        28.1MB ± 0%       28.1MB ± 0%  +0.07%  (p=0.000 n=10+10)
Reflect         78.7MB ± 0%       78.7MB ± 0%  +0.03%  (p=0.000 n=8+10)
Tar             34.4MB ± 0%       34.5MB ± 0%  +0.12%  (p=0.000 n=10+10)
XML             43.2MB ± 0%       43.2MB ± 0%  +0.13%  (p=0.000 n=10+10)

name       old allocs/op     new allocs/op     delta
Template          330k ± 0%         330k ± 0%  -0.01%  (p=0.017 n=10+10)
Unicode           337k ± 0%         337k ± 0%  +0.01%  (p=0.000 n=9+10)
GoTypes          1.15M ± 0%        1.15M ± 0%  +0.03%  (p=0.000 n=10+10)
Compiler         4.77M ± 0%        4.77M ± 0%  +0.03%  (p=0.000 n=9+10)
SSA              12.5M ± 0%        12.6M ± 0%  +1.16%  (p=0.000 n=10+10)
Flate             221k ± 0%         221k ± 0%  +0.05%  (p=0.000 n=9+10)
GoParser          275k ± 0%         275k ± 0%  +0.01%  (p=0.014 n=10+9)
Reflect           944k ± 0%         944k ± 0%  -0.02%  (p=0.000 n=10+10)
Tar               324k ± 0%         323k ± 0%  -0.12%  (p=0.000 n=10+10)
XML               384k ± 0%         384k ± 0%  -0.01%  (p=0.001 n=10+10)

name       old object-bytes  new object-bytes  delta
Template         476kB ± 0%        476kB ± 0%  -0.04%  (p=0.000 n=10+10)
Unicode          218kB ± 0%        218kB ± 0%    ~     (all equal)
GoTypes         1.58MB ± 0%       1.58MB ± 0%  -0.04%  (p=0.000 n=10+10)
Compiler        6.25MB ± 0%       6.24MB ± 0%  -0.09%  (p=0.000 n=10+10)
SSA             15.9MB ± 0%       16.1MB ± 0%  +1.22%  (p=0.000 n=10+10)
Flate            304kB ± 0%        304kB ± 0%  -0.13%  (p=0.000 n=10+10)
GoParser         370kB ± 0%        370kB ± 0%  -0.00%  (p=0.000 n=10+10)
Reflect         1.27MB ± 0%       1.27MB ± 0%  -0.12%  (p=0.000 n=10+10)
Tar              421kB ± 0%        419kB ± 0%  -0.64%  (p=0.000 n=10+10)
XML              518kB ± 0%        517kB ± 0%  -0.12%  (p=0.000 n=10+10)

name       old export-bytes  new export-bytes  delta
Template        16.7kB ± 0%       16.7kB ± 0%    ~     (all equal)
Unicode         6.52kB ± 0%       6.52kB ± 0%    ~     (all equal)
GoTypes         29.2kB ± 0%       29.2kB ± 0%    ~     (all equal)
Compiler        88.0kB ± 0%       88.0kB ± 0%    ~     (all equal)
SSA              109kB ± 0%        109kB ± 0%    ~     (all equal)
Flate           4.49kB ± 0%       4.49kB ± 0%    ~     (all equal)
GoParser        8.10kB ± 0%       8.10kB ± 0%    ~     (all equal)
Reflect         7.71kB ± 0%       7.71kB ± 0%    ~     (all equal)
Tar             9.15kB ± 0%       9.15kB ± 0%    ~     (all equal)
XML             12.3kB ± 0%       12.3kB ± 0%    ~     (all equal)

name       old text-bytes    new text-bytes    delta
HelloSize        676kB ± 0%        672kB ± 0%  -0.59%  (p=0.000 n=10+10)
CmdGoSize       7.26MB ± 0%       7.24MB ± 0%  -0.18%  (p=0.000 n=10+10)

name       old data-bytes    new data-bytes    delta
HelloSize       10.2kB ± 0%       10.2kB ± 0%    ~     (all equal)
CmdGoSize        248kB ± 0%        248kB ± 0%    ~     (all equal)

name       old bss-bytes     new bss-bytes     delta
HelloSize        125kB ± 0%        125kB ± 0%    ~     (all equal)
CmdGoSize        145kB ± 0%        145kB ± 0%    ~     (all equal)

name       old exe-bytes     new exe-bytes     delta
HelloSize       1.46MB ± 0%       1.45MB ± 0%  -0.31%  (p=0.000 n=10+10)
CmdGoSize       14.7MB ± 0%       14.7MB ± 0%  -0.17%  (p=0.000 n=10+10)

Change-Id: Ic72b0c189dd542f391e1c9ab88a76e9148dc4285
Reviewed-on: https://go-review.googlesource.com/106495
Run-TryBot: Michael Munday <mike.munday@ibm.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
This commit is contained in:
Michael Munday 2018-04-11 22:47:24 +01:00
parent 098ca846c7
commit f31a18ded4
8 changed files with 3485 additions and 169 deletions

View File

@ -371,6 +371,7 @@ var passes = [...]pass{
{name: "decompose builtin", fn: decomposeBuiltIn, required: true},
{name: "softfloat", fn: softfloat, required: true},
{name: "late opt", fn: opt, required: true}, // TODO: split required rules and optimizing rules
{name: "dead auto elim", fn: elimDeadAutosGeneric},
{name: "generic deadcode", fn: deadcode},
{name: "check bce", fn: checkbce},
{name: "branchelim", fn: branchelim},

View File

@ -133,6 +133,153 @@ func dse(f *Func) {
}
}
// elimDeadAutosGeneric deletes autos that are never accessed. To acheive this
// we track the operations that the address of each auto reaches and if it only
// reaches stores then we delete all the stores. The other operations will then
// be eliminated by the dead code elimination pass.
func elimDeadAutosGeneric(f *Func) {
addr := make(map[*Value]GCNode) // values that the address of the auto reaches
elim := make(map[*Value]GCNode) // values that could be eliminated if the auto is
used := make(map[GCNode]bool) // used autos that must be kept
// visit the value and report whether any of the maps are updated
visit := func(v *Value) (changed bool) {
args := v.Args
switch v.Op {
case OpAddr:
// Propagate the address if it points to an auto.
n, ok := v.Aux.(GCNode)
if !ok || n.StorageClass() != ClassAuto {
return
}
if addr[v] == nil {
addr[v] = n
changed = true
}
return
case OpVarDef, OpVarKill:
// v should be eliminated if we eliminate the auto.
n, ok := v.Aux.(GCNode)
if !ok || n.StorageClass() != ClassAuto {
return
}
if elim[v] == nil {
elim[v] = n
changed = true
}
return
case OpVarLive:
// Don't delete the auto if it needs to be kept alive.
n, ok := v.Aux.(GCNode)
if !ok || n.StorageClass() != ClassAuto {
return
}
if !used[n] {
used[n] = true
changed = true
}
return
case OpStore, OpMove, OpZero:
// v should be elimated if we eliminate the auto.
n, ok := addr[args[0]]
if ok && elim[v] == nil {
elim[v] = n
changed = true
}
// Other args might hold pointers to autos.
args = args[1:]
}
// The code below assumes that we have handled all the ops
// with sym effects already. Sanity check that here.
// Ignore Args since they can't be autos.
if v.Op.SymEffect() != SymNone && v.Op != OpArg {
panic("unhandled op with sym effect")
}
if v.Uses == 0 || len(args) == 0 {
return
}
// If the address of the auto reaches a memory or control
// operation not covered above then we probably need to keep it.
if v.Type.IsMemory() || v.Type.IsFlags() || (v.Op != OpPhi && v.MemoryArg() != nil) {
for _, a := range args {
if n, ok := addr[a]; ok {
if !used[n] {
used[n] = true
changed = true
}
}
}
return
}
// Propagate any auto addresses through v.
node := GCNode(nil)
for _, a := range args {
if n, ok := addr[a]; ok && !used[n] {
if node == nil {
node = n
} else if node != n {
// Most of the time we only see one pointer
// reaching an op, but some ops can take
// multiple pointers (e.g. NeqPtr, Phi etc.).
// This is rare, so just propagate the first
// value to keep things simple.
used[n] = true
changed = true
}
}
}
if node == nil {
return
}
if addr[v] == nil {
// The address of an auto reaches this op.
addr[v] = node
changed = true
return
}
if addr[v] != node {
// This doesn't happen in practice, but catch it just in case.
used[node] = true
changed = true
}
return
}
iterations := 0
for {
if iterations == 4 {
// give up
return
}
iterations++
changed := false
for _, b := range f.Blocks {
for _, v := range b.Values {
changed = visit(v) || changed
}
}
if !changed {
break
}
}
// Eliminate stores to unread autos.
for v, n := range elim {
if used[n] {
continue
}
// replace with OpCopy
v.SetArgs1(v.MemoryArg())
v.Aux = nil
v.AuxInt = 0
v.Op = OpCopy
}
}
// elimUnreadAutos deletes stores (and associated bookkeeping ops VarDef and VarKill)
// to autos that are never read from.
func elimUnreadAutos(f *Func) {

View File

@ -537,40 +537,162 @@
(NeqSlice x y) -> (NeqPtr (SlicePtr x) (SlicePtr y))
// Load of store of same address, with compatibly typed value and same size
(Load <t1> p1 (Store {t2} p2 x _)) && isSamePtr(p1,p2) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == t2.(*types.Type).Size() -> x
(Load <t1> p1 (Store {t2} p2 x _))
&& isSamePtr(p1, p2)
&& t1.Compare(x.Type) == types.CMPeq
&& t1.Size() == sizeof(t2)
-> x
(Load <t1> p1 (Store {t2} p2 _ (Store {t3} p3 x _)))
&& isSamePtr(p1, p3)
&& t1.Compare(x.Type) == types.CMPeq
&& t1.Size() == sizeof(t2)
&& disjoint(p3, sizeof(t3), p2, sizeof(t2))
-> x
(Load <t1> p1 (Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 x _))))
&& isSamePtr(p1, p4)
&& t1.Compare(x.Type) == types.CMPeq
&& t1.Size() == sizeof(t2)
&& disjoint(p4, sizeof(t4), p2, sizeof(t2))
&& disjoint(p4, sizeof(t4), p3, sizeof(t3))
-> x
(Load <t1> p1 (Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 _ (Store {t5} p5 x _)))))
&& isSamePtr(p1, p5)
&& t1.Compare(x.Type) == types.CMPeq
&& t1.Size() == sizeof(t2)
&& disjoint(p5, sizeof(t5), p2, sizeof(t2))
&& disjoint(p5, sizeof(t5), p3, sizeof(t3))
&& disjoint(p5, sizeof(t5), p4, sizeof(t4))
-> x
// Pass constants through math.Float{32,64}bits and math.Float{32,64}frombits
(Load <t1> p1 (Store {t2} p2 (Const64 [x]) _)) && isSamePtr(p1,p2) && t2.(*types.Type).Size() == 8 && is64BitFloat(t1) -> (Const64F [x])
(Load <t1> p1 (Store {t2} p2 (Const32 [x]) _)) && isSamePtr(p1,p2) && t2.(*types.Type).Size() == 4 && is32BitFloat(t1) -> (Const32F [f2i(float64(math.Float32frombits(uint32(x))))])
(Load <t1> p1 (Store {t2} p2 (Const64F [x]) _)) && isSamePtr(p1,p2) && t2.(*types.Type).Size() == 8 && is64BitInt(t1) -> (Const64 [x])
(Load <t1> p1 (Store {t2} p2 (Const32F [x]) _)) && isSamePtr(p1,p2) && t2.(*types.Type).Size() == 4 && is32BitInt(t1) -> (Const32 [int64(int32(math.Float32bits(float32(i2f(x)))))])
(Load <t1> p1 (Store {t2} p2 (Const64 [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitFloat(t1) -> (Const64F [x])
(Load <t1> p1 (Store {t2} p2 (Const32 [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitFloat(t1) -> (Const32F [f2i(float64(math.Float32frombits(uint32(x))))])
(Load <t1> p1 (Store {t2} p2 (Const64F [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitInt(t1) -> (Const64 [x])
(Load <t1> p1 (Store {t2} p2 (Const32F [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitInt(t1) -> (Const32 [int64(int32(math.Float32bits(float32(i2f(x)))))])
// Float Loads up to Zeros so they can be constant folded.
(Load <t1> op:(OffPtr [o1] p1)
(Store {t2} p2 _
mem:(Zero [n] p3 _)))
&& o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p3)
&& disjoint(op, t1.Size(), p2, sizeof(t2))
-> @mem.Block (Load <t1> op mem)
(Load <t1> op:(OffPtr [o1] p1)
(Store {t2} p2 _
(Store {t3} p3 _
mem:(Zero [n] p4 _))))
&& o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p4)
&& disjoint(op, t1.Size(), p2, sizeof(t2))
&& disjoint(op, t1.Size(), p3, sizeof(t3))
-> @mem.Block (Load <t1> op mem)
(Load <t1> op:(OffPtr [o1] p1)
(Store {t2} p2 _
(Store {t3} p3 _
(Store {t4} p4 _
mem:(Zero [n] p5 _)))))
&& o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p5)
&& disjoint(op, t1.Size(), p2, sizeof(t2))
&& disjoint(op, t1.Size(), p3, sizeof(t3))
&& disjoint(op, t1.Size(), p4, sizeof(t4))
-> @mem.Block (Load <t1> op mem)
(Load <t1> op:(OffPtr [o1] p1)
(Store {t2} p2 _
(Store {t3} p3 _
(Store {t4} p4 _
(Store {t5} p5 _
mem:(Zero [n] p6 _))))))
&& o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p6)
&& disjoint(op, t1.Size(), p2, sizeof(t2))
&& disjoint(op, t1.Size(), p3, sizeof(t3))
&& disjoint(op, t1.Size(), p4, sizeof(t4))
&& disjoint(op, t1.Size(), p5, sizeof(t5))
-> @mem.Block (Load <t1> op mem)
// Zero to Load forwarding.
(Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
&& t1.IsBoolean()
&& isSamePtr(p1, p2)
&& n >= o + 1
-> (ConstBool [0])
(Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
&& is8BitInt(t1)
&& isSamePtr(p1, p2)
&& n >= o + 1
-> (Const8 [0])
(Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
&& is16BitInt(t1)
&& isSamePtr(p1, p2)
&& n >= o + 2
-> (Const16 [0])
(Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
&& is32BitInt(t1)
&& isSamePtr(p1, p2)
&& n >= o + 4
-> (Const32 [0])
(Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
&& is64BitInt(t1)
&& isSamePtr(p1, p2)
&& n >= o + 8
-> (Const64 [0])
(Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
&& is32BitFloat(t1)
&& isSamePtr(p1, p2)
&& n >= o + 4
-> (Const32F [0])
(Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
&& is64BitFloat(t1)
&& isSamePtr(p1, p2)
&& n >= o + 8
-> (Const64F [0])
// Eliminate stores of values that have just been loaded from the same location.
// We also handle the common case where there are some intermediate stores to non-overlapping struct fields.
(Store {t1} p1 (Load <t2> p2 mem) mem) &&
isSamePtr(p1, p2) &&
t2.Size() == t1.(*types.Type).Size() -> mem
(Store {t1} (OffPtr [o1] p1) (Load <t2> (OffPtr [o1] p2) oldmem) mem:(Store {t3} (OffPtr [o3] p3) _ oldmem)) &&
isSamePtr(p1, p2) &&
isSamePtr(p1, p3) &&
t2.Size() == t1.(*types.Type).Size() &&
!overlap(o1, t2.Size(), o3, t3.(*types.Type).Size()) -> mem
(Store {t1} (OffPtr [o1] p1) (Load <t2> (OffPtr [o1] p2) oldmem) mem:(Store {t3} (OffPtr [o3] p3) _ (Store {t4} (OffPtr [o4] p4) _ oldmem))) &&
isSamePtr(p1, p2) &&
isSamePtr(p1, p3) &&
isSamePtr(p1, p4) &&
t2.Size() == t1.(*types.Type).Size() &&
!overlap(o1, t2.Size(), o3, t3.(*types.Type).Size()) &&
!overlap(o1, t2.Size(), o4, t4.(*types.Type).Size()) -> mem
(Store {t1} (OffPtr [o1] p1) (Load <t2> (OffPtr [o1] p2) oldmem) mem:(Store {t3} (OffPtr [o3] p3) _ (Store {t4} (OffPtr [o4] p4) _ (Store {t5} (OffPtr [o5] p5) _ oldmem)))) &&
isSamePtr(p1, p2) &&
isSamePtr(p1, p3) &&
isSamePtr(p1, p4) &&
isSamePtr(p1, p5) &&
t2.Size() == t1.(*types.Type).Size() &&
!overlap(o1, t2.Size(), o3, t3.(*types.Type).Size()) &&
!overlap(o1, t2.Size(), o4, t4.(*types.Type).Size()) &&
!overlap(o1, t2.Size(), o5, t5.(*types.Type).Size()) -> mem
// We also handle the common case where there are some intermediate stores.
(Store {t1} p1 (Load <t2> p2 mem) mem)
&& isSamePtr(p1, p2)
&& t2.Size() == sizeof(t1)
-> mem
(Store {t1} p1 (Load <t2> p2 oldmem) mem:(Store {t3} p3 _ oldmem))
&& isSamePtr(p1, p2)
&& t2.Size() == sizeof(t1)
&& disjoint(p1, sizeof(t1), p3, sizeof(t3))
-> mem
(Store {t1} p1 (Load <t2> p2 oldmem) mem:(Store {t3} p3 _ (Store {t4} p4 _ oldmem)))
&& isSamePtr(p1, p2)
&& t2.Size() == sizeof(t1)
&& disjoint(p1, sizeof(t1), p3, sizeof(t3))
&& disjoint(p1, sizeof(t1), p4, sizeof(t4))
-> mem
(Store {t1} p1 (Load <t2> p2 oldmem) mem:(Store {t3} p3 _ (Store {t4} p4 _ (Store {t5} p5 _ oldmem))))
&& isSamePtr(p1, p2)
&& t2.Size() == sizeof(t1)
&& disjoint(p1, sizeof(t1), p3, sizeof(t3))
&& disjoint(p1, sizeof(t1), p4, sizeof(t4))
&& disjoint(p1, sizeof(t1), p5, sizeof(t5))
-> mem
// Don't Store zeros to cleared variables.
(Store {t} (OffPtr [o] p1) x mem:(Zero [n] p2 _))
&& isConstZero(x)
&& o >= 0 && sizeof(t) + o <= n && isSamePtr(p1, p2)
-> mem
(Store {t1} op:(OffPtr [o1] p1) x mem:(Store {t2} p2 _ (Zero [n] p3 _)))
&& isConstZero(x)
&& o1 >= 0 && sizeof(t1) + o1 <= n && isSamePtr(p1, p3)
&& disjoint(op, sizeof(t1), p2, sizeof(t2))
-> mem
(Store {t1} op:(OffPtr [o1] p1) x mem:(Store {t2} p2 _ (Store {t3} p3 _ (Zero [n] p4 _))))
&& isConstZero(x)
&& o1 >= 0 && sizeof(t1) + o1 <= n && isSamePtr(p1, p4)
&& disjoint(op, sizeof(t1), p2, sizeof(t2))
&& disjoint(op, sizeof(t1), p3, sizeof(t3))
-> mem
(Store {t1} op:(OffPtr [o1] p1) x mem:(Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 _ (Zero [n] p5 _)))))
&& isConstZero(x)
&& o1 >= 0 && sizeof(t1) + o1 <= n && isSamePtr(p1, p5)
&& disjoint(op, sizeof(t1), p2, sizeof(t2))
&& disjoint(op, sizeof(t1), p3, sizeof(t3))
&& disjoint(op, sizeof(t1), p4, sizeof(t4))
-> mem
// Collapse OffPtr
(OffPtr (OffPtr p [b]) [a]) -> (OffPtr p [a+b])
@ -657,9 +779,9 @@
// un-SSAable values use mem->mem copies
(Store {t} dst (Load src mem) mem) && !fe.CanSSA(t.(*types.Type)) ->
(Move {t} [t.(*types.Type).Size()] dst src mem)
(Move {t} [sizeof(t)] dst src mem)
(Store {t} dst (Load src mem) (VarDef {x} mem)) && !fe.CanSSA(t.(*types.Type)) ->
(Move {t} [t.(*types.Type).Size()] dst src (VarDef {x} mem))
(Move {t} [sizeof(t)] dst src (VarDef {x} mem))
// array ops
(ArraySelect (ArrayMake1 x)) -> x
@ -1278,3 +1400,382 @@
// so this rule should trigger reliably.
(InterCall [argsize] (Load (OffPtr [off] (ITab (IMake (Addr {itab} (SB)) _))) _) mem) && devirt(v, itab, off) != nil ->
(StaticCall [argsize] {devirt(v, itab, off)} mem)
// Move and Zero optimizations.
// Move source and destination may overlap.
// Convert Moves into Zeros when the source is known to be zeros.
(Move {t} [n] dst1 src mem:(Zero {t} [n] dst2 _)) && isSamePtr(src, dst2)
-> (Zero {t} [n] dst1 mem)
(Move {t} [n] dst1 src mem:(VarDef (Zero {t} [n] dst0 _))) && isSamePtr(src, dst0)
-> (Zero {t} [n] dst1 mem)
// Don't Store to variables that are about to be overwritten by Move/Zero.
(Zero {t1} [n] p1 store:(Store {t2} (OffPtr [o2] p2) _ mem))
&& isSamePtr(p1, p2) && store.Uses == 1
&& n >= o2 + sizeof(t2)
&& clobber(store)
-> (Zero {t1} [n] p1 mem)
(Move {t1} [n] dst1 src1 store:(Store {t2} op:(OffPtr [o2] dst2) _ mem))
&& isSamePtr(dst1, dst2) && store.Uses == 1
&& n >= o2 + sizeof(t2)
&& disjoint(src1, n, op, sizeof(t2))
&& clobber(store)
-> (Move {t1} [n] dst1 src1 mem)
// Don't Move to variables that are immediately completely overwritten.
(Zero {t} [n] dst1 move:(Move {t} [n] dst2 _ mem))
&& move.Uses == 1
&& isSamePtr(dst1, dst2)
&& clobber(move)
-> (Zero {t} [n] dst1 mem)
(Move {t} [n] dst1 src1 move:(Move {t} [n] dst2 _ mem))
&& move.Uses == 1
&& isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n)
&& clobber(move)
-> (Move {t} [n] dst1 src1 mem)
(Zero {t} [n] dst1 vardef:(VarDef {x} move:(Move {t} [n] dst2 _ mem)))
&& move.Uses == 1 && vardef.Uses == 1
&& isSamePtr(dst1, dst2)
&& clobber(move) && clobber(vardef)
-> (Zero {t} [n] dst1 (VarDef {x} mem))
(Move {t} [n] dst1 src1 vardef:(VarDef {x} move:(Move {t} [n] dst2 _ mem)))
&& move.Uses == 1 && vardef.Uses == 1
&& isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n)
&& clobber(move) && clobber(vardef)
-> (Move {t} [n] dst1 src1 (VarDef {x} mem))
(Store {t1} op1:(OffPtr [o1] p1) d1
m2:(Store {t2} op2:(OffPtr [0] p2) d2
m3:(Move [n] p3 _ mem)))
&& m2.Uses == 1 && m3.Uses == 1
&& o1 == sizeof(t2)
&& n == sizeof(t2) + sizeof(t1)
&& isSamePtr(p1, p2) && isSamePtr(p2, p3)
&& clobber(m2) && clobber(m3)
-> (Store {t1} op1 d1 (Store {t2} op2 d2 mem))
(Store {t1} op1:(OffPtr [o1] p1) d1
m2:(Store {t2} op2:(OffPtr [o2] p2) d2
m3:(Store {t3} op3:(OffPtr [0] p3) d3
m4:(Move [n] p4 _ mem))))
&& m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1
&& o2 == sizeof(t3)
&& o1-o2 == sizeof(t2)
&& n == sizeof(t3) + sizeof(t2) + sizeof(t1)
&& isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4)
&& clobber(m2) && clobber(m3) && clobber(m4)
-> (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 mem)))
(Store {t1} op1:(OffPtr [o1] p1) d1
m2:(Store {t2} op2:(OffPtr [o2] p2) d2
m3:(Store {t3} op3:(OffPtr [o3] p3) d3
m4:(Store {t4} op4:(OffPtr [0] p4) d4
m5:(Move [n] p5 _ mem)))))
&& m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && m5.Uses == 1
&& o3 == sizeof(t4)
&& o2-o3 == sizeof(t3)
&& o1-o2 == sizeof(t2)
&& n == sizeof(t4) + sizeof(t3) + sizeof(t2) + sizeof(t1)
&& isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5)
&& clobber(m2) && clobber(m3) && clobber(m4) && clobber(m5)
-> (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 (Store {t4} op4 d4 mem))))
// Don't Zero variables that are immediately completely overwritten
// before being accessed.
(Move {t} [n] dst1 src1 zero:(Zero {t} [n] dst2 mem))
&& zero.Uses == 1
&& isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n)
&& clobber(zero)
-> (Move {t} [n] dst1 src1 mem)
(Move {t} [n] dst1 src1 vardef:(VarDef {x} zero:(Zero {t} [n] dst2 mem)))
&& zero.Uses == 1 && vardef.Uses == 1
&& isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n)
&& clobber(zero) && clobber(vardef)
-> (Move {t} [n] dst1 src1 (VarDef {x} mem))
(Store {t1} op1:(OffPtr [o1] p1) d1
m2:(Store {t2} op2:(OffPtr [0] p2) d2
m3:(Zero [n] p3 mem)))
&& m2.Uses == 1 && m3.Uses == 1
&& o1 == sizeof(t2)
&& n == sizeof(t2) + sizeof(t1)
&& isSamePtr(p1, p2) && isSamePtr(p2, p3)
&& clobber(m2) && clobber(m3)
-> (Store {t1} op1 d1 (Store {t2} op2 d2 mem))
(Store {t1} op1:(OffPtr [o1] p1) d1
m2:(Store {t2} op2:(OffPtr [o2] p2) d2
m3:(Store {t3} op3:(OffPtr [0] p3) d3
m4:(Zero [n] p4 mem))))
&& m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1
&& o2 == sizeof(t3)
&& o1-o2 == sizeof(t2)
&& n == sizeof(t3) + sizeof(t2) + sizeof(t1)
&& isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4)
&& clobber(m2) && clobber(m3) && clobber(m4)
-> (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 mem)))
(Store {t1} op1:(OffPtr [o1] p1) d1
m2:(Store {t2} op2:(OffPtr [o2] p2) d2
m3:(Store {t3} op3:(OffPtr [o3] p3) d3
m4:(Store {t4} op4:(OffPtr [0] p4) d4
m5:(Zero [n] p5 mem)))))
&& m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && m5.Uses == 1
&& o3 == sizeof(t4)
&& o2-o3 == sizeof(t3)
&& o1-o2 == sizeof(t2)
&& n == sizeof(t4) + sizeof(t3) + sizeof(t2) + sizeof(t1)
&& isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5)
&& clobber(m2) && clobber(m3) && clobber(m4) && clobber(m5)
-> (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 (Store {t4} op4 d4 mem))))
// Don't Move from memory if the values are likely to already be
// in registers.
(Move {t1} [n] dst p1
mem:(Store {t2} op2:(OffPtr [o2] p2) d1
(Store {t3} op3:(OffPtr [0] p3) d2 _)))
&& isSamePtr(p1, p2) && isSamePtr(p2, p3)
&& alignof(t2) <= alignof(t1)
&& alignof(t3) <= alignof(t1)
&& registerizable(b, t2)
&& registerizable(b, t3)
&& o2 == sizeof(t3)
&& n == sizeof(t2) + sizeof(t3)
-> (Store {t2} (OffPtr <t2.(*types.Type)> [o2] dst) d1
(Store {t3} (OffPtr <t3.(*types.Type)> [0] dst) d2 mem))
(Move {t1} [n] dst p1
mem:(Store {t2} op2:(OffPtr [o2] p2) d1
(Store {t3} op3:(OffPtr [o3] p3) d2
(Store {t4} op4:(OffPtr [0] p4) d3 _))))
&& isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4)
&& alignof(t2) <= alignof(t1)
&& alignof(t3) <= alignof(t1)
&& alignof(t4) <= alignof(t1)
&& registerizable(b, t2)
&& registerizable(b, t3)
&& registerizable(b, t4)
&& o3 == sizeof(t4)
&& o2-o3 == sizeof(t3)
&& n == sizeof(t2) + sizeof(t3) + sizeof(t4)
-> (Store {t2} (OffPtr <t2.(*types.Type)> [o2] dst) d1
(Store {t3} (OffPtr <t3.(*types.Type)> [o3] dst) d2
(Store {t4} (OffPtr <t4.(*types.Type)> [0] dst) d3 mem)))
(Move {t1} [n] dst p1
mem:(Store {t2} op2:(OffPtr [o2] p2) d1
(Store {t3} op3:(OffPtr [o3] p3) d2
(Store {t4} op4:(OffPtr [o4] p4) d3
(Store {t5} op5:(OffPtr [0] p5) d4 _)))))
&& isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5)
&& alignof(t2) <= alignof(t1)
&& alignof(t3) <= alignof(t1)
&& alignof(t4) <= alignof(t1)
&& alignof(t5) <= alignof(t1)
&& registerizable(b, t2)
&& registerizable(b, t3)
&& registerizable(b, t4)
&& registerizable(b, t5)
&& o4 == sizeof(t5)
&& o3-o4 == sizeof(t4)
&& o2-o3 == sizeof(t3)
&& n == sizeof(t2) + sizeof(t3) + sizeof(t4) + sizeof(t5)
-> (Store {t2} (OffPtr <t2.(*types.Type)> [o2] dst) d1
(Store {t3} (OffPtr <t3.(*types.Type)> [o3] dst) d2
(Store {t4} (OffPtr <t4.(*types.Type)> [o4] dst) d3
(Store {t5} (OffPtr <t5.(*types.Type)> [0] dst) d4 mem))))
// Same thing but with VarDef in the middle.
(Move {t1} [n] dst p1
mem:(VarDef
(Store {t2} op2:(OffPtr [o2] p2) d1
(Store {t3} op3:(OffPtr [0] p3) d2 _))))
&& isSamePtr(p1, p2) && isSamePtr(p2, p3)
&& alignof(t2) <= alignof(t1)
&& alignof(t3) <= alignof(t1)
&& registerizable(b, t2)
&& registerizable(b, t3)
&& o2 == sizeof(t3)
&& n == sizeof(t2) + sizeof(t3)
-> (Store {t2} (OffPtr <t2.(*types.Type)> [o2] dst) d1
(Store {t3} (OffPtr <t3.(*types.Type)> [0] dst) d2 mem))
(Move {t1} [n] dst p1
mem:(VarDef
(Store {t2} op2:(OffPtr [o2] p2) d1
(Store {t3} op3:(OffPtr [o3] p3) d2
(Store {t4} op4:(OffPtr [0] p4) d3 _)))))
&& isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4)
&& alignof(t2) <= alignof(t1)
&& alignof(t3) <= alignof(t1)
&& alignof(t4) <= alignof(t1)
&& registerizable(b, t2)
&& registerizable(b, t3)
&& registerizable(b, t4)
&& o3 == sizeof(t4)
&& o2-o3 == sizeof(t3)
&& n == sizeof(t2) + sizeof(t3) + sizeof(t4)
-> (Store {t2} (OffPtr <t2.(*types.Type)> [o2] dst) d1
(Store {t3} (OffPtr <t3.(*types.Type)> [o3] dst) d2
(Store {t4} (OffPtr <t4.(*types.Type)> [0] dst) d3 mem)))
(Move {t1} [n] dst p1
mem:(VarDef
(Store {t2} op2:(OffPtr [o2] p2) d1
(Store {t3} op3:(OffPtr [o3] p3) d2
(Store {t4} op4:(OffPtr [o4] p4) d3
(Store {t5} op5:(OffPtr [0] p5) d4 _))))))
&& isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5)
&& alignof(t2) <= alignof(t1)
&& alignof(t3) <= alignof(t1)
&& alignof(t4) <= alignof(t1)
&& alignof(t5) <= alignof(t1)
&& registerizable(b, t2)
&& registerizable(b, t3)
&& registerizable(b, t4)
&& registerizable(b, t5)
&& o4 == sizeof(t5)
&& o3-o4 == sizeof(t4)
&& o2-o3 == sizeof(t3)
&& n == sizeof(t2) + sizeof(t3) + sizeof(t4) + sizeof(t5)
-> (Store {t2} (OffPtr <t2.(*types.Type)> [o2] dst) d1
(Store {t3} (OffPtr <t3.(*types.Type)> [o3] dst) d2
(Store {t4} (OffPtr <t4.(*types.Type)> [o4] dst) d3
(Store {t5} (OffPtr <t5.(*types.Type)> [0] dst) d4 mem))))
// Prefer to Zero and Store than to Move.
(Move {t1} [n] dst p1
mem:(Store {t2} op2:(OffPtr <tt2> [o2] p2) d1
(Zero {t3} [n] p3 _)))
&& isSamePtr(p1, p2) && isSamePtr(p2, p3)
&& alignof(t2) <= alignof(t1)
&& alignof(t3) <= alignof(t1)
&& registerizable(b, t2)
&& n >= o2 + sizeof(t2)
-> (Store {t2} (OffPtr <tt2> [o2] dst) d1
(Zero {t1} [n] dst mem))
(Move {t1} [n] dst p1
mem:(Store {t2} (OffPtr <tt2> [o2] p2) d1
(Store {t3} (OffPtr <tt3> [o3] p3) d2
(Zero {t4} [n] p4 _))))
&& isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4)
&& alignof(t2) <= alignof(t1)
&& alignof(t3) <= alignof(t1)
&& alignof(t4) <= alignof(t1)
&& registerizable(b, t2)
&& registerizable(b, t3)
&& n >= o2 + sizeof(t2)
&& n >= o3 + sizeof(t3)
-> (Store {t2} (OffPtr <tt2> [o2] dst) d1
(Store {t3} (OffPtr <tt3> [o3] dst) d2
(Zero {t1} [n] dst mem)))
(Move {t1} [n] dst p1
mem:(Store {t2} (OffPtr <tt2> [o2] p2) d1
(Store {t3} (OffPtr <tt3> [o3] p3) d2
(Store {t4} (OffPtr <tt4> [o4] p4) d3
(Zero {t5} [n] p5 _)))))
&& isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5)
&& alignof(t2) <= alignof(t1)
&& alignof(t3) <= alignof(t1)
&& alignof(t4) <= alignof(t1)
&& alignof(t5) <= alignof(t1)
&& registerizable(b, t2)
&& registerizable(b, t3)
&& registerizable(b, t4)
&& n >= o2 + sizeof(t2)
&& n >= o3 + sizeof(t3)
&& n >= o4 + sizeof(t4)
-> (Store {t2} (OffPtr <tt2> [o2] dst) d1
(Store {t3} (OffPtr <tt3> [o3] dst) d2
(Store {t4} (OffPtr <tt4> [o4] dst) d3
(Zero {t1} [n] dst mem))))
(Move {t1} [n] dst p1
mem:(Store {t2} (OffPtr <tt2> [o2] p2) d1
(Store {t3} (OffPtr <tt3> [o3] p3) d2
(Store {t4} (OffPtr <tt4> [o4] p4) d3
(Store {t5} (OffPtr <tt5> [o5] p5) d4
(Zero {t6} [n] p6 _))))))
&& isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && isSamePtr(p5, p6)
&& alignof(t2) <= alignof(t1)
&& alignof(t3) <= alignof(t1)
&& alignof(t4) <= alignof(t1)
&& alignof(t5) <= alignof(t1)
&& alignof(t6) <= alignof(t1)
&& registerizable(b, t2)
&& registerizable(b, t3)
&& registerizable(b, t4)
&& registerizable(b, t5)
&& n >= o2 + sizeof(t2)
&& n >= o3 + sizeof(t3)
&& n >= o4 + sizeof(t4)
&& n >= o5 + sizeof(t5)
-> (Store {t2} (OffPtr <tt2> [o2] dst) d1
(Store {t3} (OffPtr <tt3> [o3] dst) d2
(Store {t4} (OffPtr <tt4> [o4] dst) d3
(Store {t5} (OffPtr <tt5> [o5] dst) d4
(Zero {t1} [n] dst mem)))))
(Move {t1} [n] dst p1
mem:(VarDef
(Store {t2} op2:(OffPtr <tt2> [o2] p2) d1
(Zero {t3} [n] p3 _))))
&& isSamePtr(p1, p2) && isSamePtr(p2, p3)
&& alignof(t2) <= alignof(t1)
&& alignof(t3) <= alignof(t1)
&& registerizable(b, t2)
&& n >= o2 + sizeof(t2)
-> (Store {t2} (OffPtr <tt2> [o2] dst) d1
(Zero {t1} [n] dst mem))
(Move {t1} [n] dst p1
mem:(VarDef
(Store {t2} (OffPtr <tt2> [o2] p2) d1
(Store {t3} (OffPtr <tt3> [o3] p3) d2
(Zero {t4} [n] p4 _)))))
&& isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4)
&& alignof(t2) <= alignof(t1)
&& alignof(t3) <= alignof(t1)
&& alignof(t4) <= alignof(t1)
&& registerizable(b, t2)
&& registerizable(b, t3)
&& n >= o2 + sizeof(t2)
&& n >= o3 + sizeof(t3)
-> (Store {t2} (OffPtr <tt2> [o2] dst) d1
(Store {t3} (OffPtr <tt3> [o3] dst) d2
(Zero {t1} [n] dst mem)))
(Move {t1} [n] dst p1
mem:(VarDef
(Store {t2} (OffPtr <tt2> [o2] p2) d1
(Store {t3} (OffPtr <tt3> [o3] p3) d2
(Store {t4} (OffPtr <tt4> [o4] p4) d3
(Zero {t5} [n] p5 _))))))
&& isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5)
&& alignof(t2) <= alignof(t1)
&& alignof(t3) <= alignof(t1)
&& alignof(t4) <= alignof(t1)
&& alignof(t5) <= alignof(t1)
&& registerizable(b, t2)
&& registerizable(b, t3)
&& registerizable(b, t4)
&& n >= o2 + sizeof(t2)
&& n >= o3 + sizeof(t3)
&& n >= o4 + sizeof(t4)
-> (Store {t2} (OffPtr <tt2> [o2] dst) d1
(Store {t3} (OffPtr <tt3> [o3] dst) d2
(Store {t4} (OffPtr <tt4> [o4] dst) d3
(Zero {t1} [n] dst mem))))
(Move {t1} [n] dst p1
mem:(VarDef
(Store {t2} (OffPtr <tt2> [o2] p2) d1
(Store {t3} (OffPtr <tt3> [o3] p3) d2
(Store {t4} (OffPtr <tt4> [o4] p4) d3
(Store {t5} (OffPtr <tt5> [o5] p5) d4
(Zero {t6} [n] p6 _)))))))
&& isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && isSamePtr(p5, p6)
&& alignof(t2) <= alignof(t1)
&& alignof(t3) <= alignof(t1)
&& alignof(t4) <= alignof(t1)
&& alignof(t5) <= alignof(t1)
&& alignof(t6) <= alignof(t1)
&& registerizable(b, t2)
&& registerizable(b, t3)
&& registerizable(b, t4)
&& registerizable(b, t5)
&& n >= o2 + sizeof(t2)
&& n >= o3 + sizeof(t3)
&& n >= o4 + sizeof(t4)
&& n >= o5 + sizeof(t5)
-> (Store {t2} (OffPtr <tt2> [o2] dst) d1
(Store {t3} (OffPtr <tt3> [o3] dst) d2
(Store {t4} (OffPtr <tt4> [o4] dst) d3
(Store {t5} (OffPtr <tt5> [o5] dst) d4
(Zero {t1} [n] dst mem)))))

View File

@ -456,6 +456,50 @@ func isSamePtr(p1, p2 *Value) bool {
return false
}
// disjoint reports whether the memory region specified by [p1:p1+n1)
// does not overlap with [p2:p2+n2).
// A return value of false does not imply the regions overlap.
func disjoint(p1 *Value, n1 int64, p2 *Value, n2 int64) bool {
if n1 == 0 || n2 == 0 {
return true
}
if p1 == p2 {
return false
}
baseAndOffset := func(ptr *Value) (base *Value, offset int64) {
base, offset = ptr, 0
if base.Op == OpOffPtr {
offset += base.AuxInt
base = base.Args[0]
}
return base, offset
}
p1, off1 := baseAndOffset(p1)
p2, off2 := baseAndOffset(p2)
if isSamePtr(p1, p2) {
return !overlap(off1, n1, off2, n2)
}
// p1 and p2 are not the same, so if they are both OpAddrs then
// they point to different variables.
// If one pointer is on the stack and the other is an argument
// then they can't overlap.
switch p1.Op {
case OpAddr:
if p2.Op == OpAddr || p2.Op == OpSP {
return true
}
return p2.Op == OpArg && p1.Args[0].Op == OpSP
case OpArg:
if p2.Op == OpSP {
return true
}
return p2.Op == OpAddr && p2.Args[0].Op == OpSP
case OpSP:
return p2.Op == OpAddr || p2.Op == OpArg || p2.Op == OpSP
}
return false
}
// moveSize returns the number of bytes an aligned MOV instruction moves
func moveSize(align int64, c *Config) int64 {
switch {
@ -879,3 +923,30 @@ func arm64BFWidth(mask, rshift int64) int64 {
}
return nto(shiftedMask)
}
// sizeof returns the size of t in bytes.
// It will panic if t is not a *types.Type.
func sizeof(t interface{}) int64 {
return t.(*types.Type).Size()
}
// alignof returns the alignment of t in bytes.
// It will panic if t is not a *types.Type.
func alignof(t interface{}) int64 {
return t.(*types.Type).Alignment()
}
// registerizable reports whether t is a primitive type that fits in
// a register. It assumes float64 values will always fit into registers
// even if that isn't strictly true.
// It will panic if t is not a *types.Type.
func registerizable(b *Block, t interface{}) bool {
typ := t.(*types.Type)
if typ.IsPtrShaped() || typ.IsFloat() {
return true
}
if typ.IsInteger() {
return typ.Size() <= b.Func.Config.RegSize
}
return false
}

File diff suppressed because it is too large Load Diff

View File

@ -93,8 +93,10 @@ func TestUnaligned64(t *testing.T) {
}
x := make([]uint32, 4)
up64 := (*uint64)(unsafe.Pointer(&x[1])) // misaligned
p64 := (*int64)(unsafe.Pointer(&x[1])) // misaligned
u := unsafe.Pointer(uintptr(unsafe.Pointer(&x[0])) | 4) // force alignment to 4
up64 := (*uint64)(u) // misaligned
p64 := (*int64)(u) // misaligned
shouldPanic(t, "Load64", func() { atomic.Load64(up64) })
shouldPanic(t, "Loadint64", func() { atomic.Loadint64(p64) })

View File

@ -11,22 +11,81 @@ import "runtime"
// This file contains code generation tests related to the use of the
// stack.
// check that stack stores are optimized away
// Check that stack stores are optimized away.
// 386:"TEXT\t.*, [$]0-4"
// amd64:"TEXT\t.*, [$]0-8"
// arm:"TEXT\t.*, [$]-4-4"
// arm64:"TEXT\t.*, [$]-8-8"
// s390x:"TEXT\t.*, [$]0-8"
// ppc64le:"TEXT\t.*, [$]0-8"
// mips:"TEXT\t.*, [$]-4-4"
// 386:"TEXT\t.*, [$]0-"
// amd64:"TEXT\t.*, [$]0-"
// arm:"TEXT\t.*, [$]-4-"
// arm64:"TEXT\t.*, [$]-8-"
// mips:"TEXT\t.*, [$]-4-"
// ppc64le:"TEXT\t.*, [$]0-"
// s390x:"TEXT\t.*, [$]0-"
func StackStore() int {
var x int
return *(&x)
}
type T struct {
A, B, C, D int // keep exported fields
x, y, z int // reset unexported fields
}
// Check that large structs are cleared directly (issue #24416).
// 386:"TEXT\t.*, [$]0-"
// amd64:"TEXT\t.*, [$]0-"
// arm:"TEXT\t.*, [$]0-" (spills return address)
// arm64:"TEXT\t.*, [$]-8-"
// mips:"TEXT\t.*, [$]-4-"
// ppc64le:"TEXT\t.*, [$]0-"
// s390x:"TEXT\t.*, [$]0-"
func ZeroLargeStruct(x *T) {
t := T{}
*x = t
}
// Check that structs are partially initialised directly (issue #24386).
// Notes:
// - 386 fails due to spilling a register
// amd64:"TEXT\t.*, [$]0-"
// arm:"TEXT\t.*, [$]0-" (spills return address)
// arm64:"TEXT\t.*, [$]-8-"
// ppc64le:"TEXT\t.*, [$]0-"
// s390x:"TEXT\t.*, [$]0-"
// Note: that 386 currently has to spill a register.
func KeepWanted(t *T) {
*t = T{A: t.A, B: t.B, C: t.C, D: t.D}
}
// Check that small array operations avoid using the stack (issue #15925).
// Notes:
// - 386 fails due to spilling a register
// - arm & mips fail due to softfloat calls
// amd64:"TEXT\t.*, [$]0-"
// arm64:"TEXT\t.*, [$]-8-"
// ppc64le:"TEXT\t.*, [$]0-"
// s390x:"TEXT\t.*, [$]0-"
func ArrayAdd64(a, b [4]float64) [4]float64 {
return [4]float64{a[0] + b[0], a[1] + b[1], a[2] + b[2], a[3] + b[3]}
}
// Check that small array initialization avoids using the stack.
// 386:"TEXT\t.*, [$]0-"
// amd64:"TEXT\t.*, [$]0-"
// arm:"TEXT\t.*, [$]0-" (spills return address)
// arm64:"TEXT\t.*, [$]-8-"
// mips:"TEXT\t.*, [$]-4-"
// ppc64le:"TEXT\t.*, [$]0-"
// s390x:"TEXT\t.*, [$]0-"
func ArrayInit(i, j int) [4]int {
return [4]int{i, 0, j, 0}
}
// Check that assembly output has matching offset and base register
// (Issue #21064).
// (issue #21064).
// amd64:`.*b\+24\(SP\)`
// arm:`.*b\+4\(FP\)`

View File

@ -13,6 +13,9 @@
package p
import "runtime"
func f() { // ERROR "stack frame too large"
_ = [][]int{1e9: []int{}}
x := [][]int{1e9: []int{}}
runtime.KeepAlive(x)
}