1
0
mirror of https://github.com/golang/go synced 2024-11-11 20:40:21 -07:00

cmd/compile: optimize shift pairs and masks on s390x

Optimize combinations of left and right shifts by a constant value
into a 'rotate then insert selected bits [into zero]' instruction.
Use the same instruction for contiguous masks since it has some
benefits over 'and immediate' (not restricted to 32-bits, does not
overwrite source register).

To keep the complexity of this change under control I've only
implemented 64 bit operations for now.

There are a lot more optimizations that can be done with this
instruction family. However, since their function overlaps with other
instructions we need to be somewhat careful not to break existing
optimization rules by creating optimization dead ends. This is
particularly true of the load/store merging rules which contain lots
of zero extensions and shifts.

This CL does interfere with the store merging rules when an operand
is shifted left before it is stored:

  binary.BigEndian.PutUint64(b, x << 1)

This is unfortunate but it's not critical and somewhat complex so
I plan to fix that in a follow up CL.

file      before    after     Δ       %
addr2line 4117446   4117282   -164    -0.004%
api       4945184   4942752   -2432   -0.049%
asm       4998079   4991891   -6188   -0.124%
buildid   2685158   2684074   -1084   -0.040%
cgo       4553732   4553394   -338    -0.007%
compile   19294446  19245070  -49376  -0.256%
cover     4897105   4891319   -5786   -0.118%
dist      3544389   3542785   -1604   -0.045%
doc       3926795   3927617   +822    +0.021%
fix       3302958   3293868   -9090   -0.275%
link      6546274   6543456   -2818   -0.043%
nm        4102021   4100825   -1196   -0.029%
objdump   4542431   4548483   +6052   +0.133%
pack      2482465   2416389   -66076  -2.662%
pprof     13366541  13363915  -2626   -0.020%
test2json 2829007   2761515   -67492  -2.386%
trace     10216164  10219684  +3520   +0.034%
vet       6773956   6773572   -384    -0.006%
total     107124151 106917891 -206260 -0.193%

Change-Id: I7591cce41e06867ba10a745daae9333513062746
Reviewed-on: https://go-review.googlesource.com/c/go/+/233317
Run-TryBot: Michael Munday <mike.munday@ibm.com>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
Trust: Michael Munday <mike.munday@ibm.com>
This commit is contained in:
Michael Munday 2020-05-11 09:44:48 -07:00
parent b7e0adfee2
commit 854e892ce1
12 changed files with 1012 additions and 338 deletions

View File

@ -188,6 +188,18 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
{Type: obj.TYPE_REG, Reg: r2},
})
p.To = obj.Addr{Type: obj.TYPE_REG, Reg: r1}
case ssa.OpS390XRISBGZ:
r1 := v.Reg()
r2 := v.Args[0].Reg()
i := v.Aux.(s390x.RotateParams)
p := s.Prog(v.Op.Asm())
p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: int64(i.Start)}
p.SetRestArgs([]obj.Addr{
{Type: obj.TYPE_CONST, Offset: int64(i.End)},
{Type: obj.TYPE_CONST, Offset: int64(i.Amount)},
{Type: obj.TYPE_REG, Reg: r2},
})
p.To = obj.Addr{Type: obj.TYPE_REG, Reg: r1}
case ssa.OpS390XADD, ssa.OpS390XADDW,
ssa.OpS390XSUB, ssa.OpS390XSUBW,
ssa.OpS390XAND, ssa.OpS390XANDW,
@ -360,7 +372,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpS390XSLDconst, ssa.OpS390XSLWconst,
ssa.OpS390XSRDconst, ssa.OpS390XSRWconst,
ssa.OpS390XSRADconst, ssa.OpS390XSRAWconst,
ssa.OpS390XRLLGconst, ssa.OpS390XRLLconst:
ssa.OpS390XRLLconst:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt

View File

@ -643,8 +643,18 @@
// equivalent to the leftmost 32 bits being set.
// TODO(mundaym): modify the assembler to accept 64-bit values
// and use isU32Bit(^c).
(AND x (MOVDconst [c])) && is32Bit(c) && c < 0 => (ANDconst [c] x)
(AND x (MOVDconst [c])) && is32Bit(c) && c >= 0 => (MOVWZreg (ANDWconst <typ.UInt32> [int32(c)] x))
(AND x (MOVDconst [c]))
&& s390x.NewRotateParams(0, 63, 0).OutMerge(uint64(c)) != nil
=> (RISBGZ x {*s390x.NewRotateParams(0, 63, 0).OutMerge(uint64(c))})
(AND x (MOVDconst [c]))
&& is32Bit(c)
&& c < 0
=> (ANDconst [c] x)
(AND x (MOVDconst [c]))
&& is32Bit(c)
&& c >= 0
=> (MOVWZreg (ANDWconst <typ.UInt32> [int32(c)] x))
(ANDW x (MOVDconst [c])) => (ANDWconst [int32(c)] x)
((AND|ANDW)const [c] ((AND|ANDW)const [d] x)) => ((AND|ANDW)const [c&d] x)
@ -653,14 +663,20 @@
((OR|XOR)W x (MOVDconst [c])) => ((OR|XOR)Wconst [int32(c)] x)
// Constant shifts.
(S(LD|RD|RAD|LW|RW|RAW) x (MOVDconst [c]))
=> (S(LD|RD|RAD|LW|RW|RAW)const x [int8(c&63)])
(S(LD|RD|RAD) x (MOVDconst [c])) => (S(LD|RD|RAD)const x [int8(c&63)])
(S(LW|RW|RAW) x (MOVDconst [c])) && c&32 == 0 => (S(LW|RW|RAW)const x [int8(c&31)])
(S(LW|RW) _ (MOVDconst [c])) && c&32 != 0 => (MOVDconst [0])
(SRAW x (MOVDconst [c])) && c&32 != 0 => (SRAWconst x [31])
// Shifts only use the rightmost 6 bits of the shift value.
(S(LD|RD|RAD|LW|RW|RAW) x (RISBGZ y {r}))
&& r.Amount == 0
&& r.OutMask()&63 == 63
=> (S(LD|RD|RAD|LW|RW|RAW) x y)
(S(LD|RD|RAD|LW|RW|RAW) x (AND (MOVDconst [c]) y))
=> (S(LD|RD|RAD|LW|RW|RAW) x (ANDWconst <typ.UInt32> [int32(c&63)] y))
=> (S(LD|RD|RAD|LW|RW|RAW) x (ANDWconst <typ.UInt32> [int32(c&63)] y))
(S(LD|RD|RAD|LW|RW|RAW) x (ANDWconst [c] y)) && c&63 == 63
=> (S(LD|RD|RAD|LW|RW|RAW) x y)
=> (S(LD|RD|RAD|LW|RW|RAW) x y)
(SLD x (MOV(W|H|B|WZ|HZ|BZ)reg y)) => (SLD x y)
(SRD x (MOV(W|H|B|WZ|HZ|BZ)reg y)) => (SRD x y)
(SRAD x (MOV(W|H|B|WZ|HZ|BZ)reg y)) => (SRAD x y)
@ -668,17 +684,13 @@
(SRW x (MOV(W|H|B|WZ|HZ|BZ)reg y)) => (SRW x y)
(SRAW x (MOV(W|H|B|WZ|HZ|BZ)reg y)) => (SRAW x y)
// Constant rotate generation
(RLL x (MOVDconst [c])) => (RLLconst x [int8(c&31)])
(RLLG x (MOVDconst [c])) => (RLLGconst x [int8(c&63)])
// Match rotate by constant.
(RLLG x (MOVDconst [c])) => (RISBGZ x {s390x.NewRotateParams(0, 63, int8(c&63))})
(RLL x (MOVDconst [c])) => (RLLconst x [int8(c&31)])
(ADD (SLDconst x [c]) (SRDconst x [d])) && d == 64-c => (RLLGconst [c] x)
( OR (SLDconst x [c]) (SRDconst x [d])) && d == 64-c => (RLLGconst [c] x)
(XOR (SLDconst x [c]) (SRDconst x [d])) && d == 64-c => (RLLGconst [c] x)
(ADDW (SLWconst x [c]) (SRWconst x [d])) && d == 32-c => (RLLconst [c] x)
( ORW (SLWconst x [c]) (SRWconst x [d])) && d == 32-c => (RLLconst [c] x)
(XORW (SLWconst x [c]) (SRWconst x [d])) && d == 32-c => (RLLconst [c] x)
// Match rotate by constant pattern.
((ADD|OR|XOR) (SLDconst x [c]) (SRDconst x [64-c])) => (RISBGZ x {s390x.NewRotateParams(0, 63, c)})
((ADD|OR|XOR)W (SLWconst x [c]) (SRWconst x [32-c])) => (RLLconst x [c])
// Signed 64-bit comparison with immediate.
(CMP x (MOVDconst [c])) && is32Bit(c) => (CMPconst x [int32(c)])
@ -692,15 +704,97 @@
(CMP(W|WU) x (MOVDconst [c])) => (CMP(W|WU)const x [int32(c)])
(CMP(W|WU) (MOVDconst [c]) x) => (InvertFlags (CMP(W|WU)const x [int32(c)]))
// Match (x >> c) << d to 'rotate then insert selected bits [into zero]'.
(SLDconst (SRDconst x [c]) [d]) => (RISBGZ x {s390x.NewRotateParams(max8(0, c-d), 63-d, (d-c)&63)})
// Match (x << c) >> d to 'rotate then insert selected bits [into zero]'.
(SRDconst (SLDconst x [c]) [d]) => (RISBGZ x {s390x.NewRotateParams(d, min8(63, 63-c+d), (c-d)&63)})
// Absorb input zero extension into 'rotate then insert selected bits [into zero]'.
(RISBGZ (MOVWZreg x) {r}) && r.InMerge(0xffffffff) != nil => (RISBGZ x {*r.InMerge(0xffffffff)})
(RISBGZ (MOVHZreg x) {r}) && r.InMerge(0x0000ffff) != nil => (RISBGZ x {*r.InMerge(0x0000ffff)})
(RISBGZ (MOVBZreg x) {r}) && r.InMerge(0x000000ff) != nil => (RISBGZ x {*r.InMerge(0x000000ff)})
// Absorb 'rotate then insert selected bits [into zero]' into zero extension.
(MOVWZreg (RISBGZ x {r})) && r.OutMerge(0xffffffff) != nil => (RISBGZ x {*r.OutMerge(0xffffffff)})
(MOVHZreg (RISBGZ x {r})) && r.OutMerge(0x0000ffff) != nil => (RISBGZ x {*r.OutMerge(0x0000ffff)})
(MOVBZreg (RISBGZ x {r})) && r.OutMerge(0x000000ff) != nil => (RISBGZ x {*r.OutMerge(0x000000ff)})
// Absorb shift into 'rotate then insert selected bits [into zero]'.
//
// Any unsigned shift can be represented as a rotate and mask operation:
//
// x << c => RotateLeft64(x, c) & (^uint64(0) << c)
// x >> c => RotateLeft64(x, -c) & (^uint64(0) >> c)
//
// Therefore when a shift is used as the input to a rotate then insert
// selected bits instruction we can merge the two together. We just have
// to be careful that the resultant mask is representable (non-zero and
// contiguous). For example, assuming that x is variable and c, y and m
// are constants, a shift followed by a rotate then insert selected bits
// could be represented as:
//
// RotateLeft64(RotateLeft64(x, c) & (^uint64(0) << c), y) & m
//
// We can split the rotation by y into two, one rotate for x and one for
// the mask:
//
// RotateLeft64(RotateLeft64(x, c), y) & (RotateLeft64(^uint64(0) << c, y)) & m
//
// The rotations of x by c followed by y can then be combined:
//
// RotateLeft64(x, c+y) & (RotateLeft64(^uint64(0) << c, y)) & m
// ^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
// rotate mask
//
// To perform this optimization we therefore just need to check that it
// is valid to merge the shift mask (^(uint64(0)<<c)) into the selected
// bits mask (i.e. that the resultant mask is non-zero and contiguous).
//
(RISBGZ (SLDconst x [c]) {r}) && r.InMerge(^uint64(0)<<c) != nil => (RISBGZ x {(*r.InMerge(^uint64(0)<<c)).RotateLeft(c)})
(RISBGZ (SRDconst x [c]) {r}) && r.InMerge(^uint64(0)>>c) != nil => (RISBGZ x {(*r.InMerge(^uint64(0)>>c)).RotateLeft(-c)})
// Absorb 'rotate then insert selected bits [into zero]' into left shift.
(SLDconst (RISBGZ x {r}) [c])
&& s390x.NewRotateParams(0, 63-c, c).InMerge(r.OutMask()) != nil
=> (RISBGZ x {(*s390x.NewRotateParams(0, 63-c, c).InMerge(r.OutMask())).RotateLeft(r.Amount)})
// Absorb 'rotate then insert selected bits [into zero]' into right shift.
(SRDconst (RISBGZ x {r}) [c])
&& s390x.NewRotateParams(c, 63, -c&63).InMerge(r.OutMask()) != nil
=> (RISBGZ x {(*s390x.NewRotateParams(c, 63, -c&63).InMerge(r.OutMask())).RotateLeft(r.Amount)})
// Merge 'rotate then insert selected bits [into zero]' instructions together.
(RISBGZ (RISBGZ x {y}) {z})
&& z.InMerge(y.OutMask()) != nil
=> (RISBGZ x {(*z.InMerge(y.OutMask())).RotateLeft(y.Amount)})
// Convert RISBGZ into 64-bit shift (helps CSE).
(RISBGZ x {r}) && r.End == 63 && r.Start == -r.Amount&63 => (SRDconst x [-r.Amount&63])
(RISBGZ x {r}) && r.Start == 0 && r.End == 63-r.Amount => (SLDconst x [r.Amount])
// Optimize single bit isolation when it is known to be equivalent to
// the most significant bit due to mask produced by arithmetic shift.
// Simply isolate the most significant bit itself and place it in the
// correct position.
//
// Example: (int64(x) >> 63) & 0x8 -> RISBGZ $60, $60, $4, Rsrc, Rdst
(RISBGZ (SRADconst x [c]) {r})
&& r.Start == r.End // single bit selected
&& (r.Start+r.Amount)&63 <= c // equivalent to most significant bit of x
=> (RISBGZ x {s390x.NewRotateParams(r.Start, r.Start, -r.Start&63)})
// Canonicalize the order of arguments to comparisons - helps with CSE.
((CMP|CMPW|CMPU|CMPWU) x y) && x.ID > y.ID => (InvertFlags ((CMP|CMPW|CMPU|CMPWU) y x))
// Using MOV{W,H,B}Zreg instead of AND is cheaper.
(AND x (MOVDconst [0xFF])) => (MOVBZreg x)
(AND x (MOVDconst [0xFFFF])) => (MOVHZreg x)
(AND x (MOVDconst [0xFFFFFFFF])) => (MOVWZreg x)
(ANDWconst [0xFF] x) => (MOVBZreg x)
(ANDWconst [0xFFFF] x) => (MOVHZreg x)
// Use sign/zero extend instead of RISBGZ.
(RISBGZ x {r}) && r == s390x.NewRotateParams(56, 63, 0) => (MOVBZreg x)
(RISBGZ x {r}) && r == s390x.NewRotateParams(48, 63, 0) => (MOVHZreg x)
(RISBGZ x {r}) && r == s390x.NewRotateParams(32, 63, 0) => (MOVWZreg x)
// Use sign/zero extend instead of ANDW.
(ANDWconst [0x00ff] x) => (MOVBZreg x)
(ANDWconst [0xffff] x) => (MOVHZreg x)
// Strength reduce multiplication to the sum (or difference) of two powers of two.
//
@ -773,21 +867,22 @@
// detect attempts to set/clear the sign bit
// may need to be reworked when NIHH/OIHH are added
(SRDconst [1] (SLDconst [1] (LGDR <t> x))) => (LGDR <t> (LPDFR <x.Type> x))
(LDGR <t> (SRDconst [1] (SLDconst [1] x))) => (LPDFR (LDGR <t> x))
(AND (MOVDconst [^(-1<<63)]) (LGDR <t> x)) => (LGDR <t> (LPDFR <x.Type> x))
(LDGR <t> (AND (MOVDconst [^(-1<<63)]) x)) => (LPDFR (LDGR <t> x))
(OR (MOVDconst [-1<<63]) (LGDR <t> x)) => (LGDR <t> (LNDFR <x.Type> x))
(LDGR <t> (OR (MOVDconst [-1<<63]) x)) => (LNDFR (LDGR <t> x))
(RISBGZ (LGDR <t> x) {r}) && r == s390x.NewRotateParams(1, 63, 0) => (LGDR <t> (LPDFR <x.Type> x))
(LDGR <t> (RISBGZ x {r})) && r == s390x.NewRotateParams(1, 63, 0) => (LPDFR (LDGR <t> x))
(OR (MOVDconst [-1<<63]) (LGDR <t> x)) => (LGDR <t> (LNDFR <x.Type> x))
(LDGR <t> (OR (MOVDconst [-1<<63]) x)) => (LNDFR (LDGR <t> x))
// detect attempts to set the sign bit with load
(LDGR <t> x:(ORload <t1> [off] {sym} (MOVDconst [-1<<63]) ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (LNDFR <t> (LDGR <t> (MOVDload <t1> [off] {sym} ptr mem)))
// detect copysign
(OR (SLDconst [63] (SRDconst [63] (LGDR x))) (LGDR (LPDFR <t> y))) => (LGDR (CPSDR <t> y x))
(OR (SLDconst [63] (SRDconst [63] (LGDR x))) (MOVDconst [c])) && c & -1<<63 == 0 => (LGDR (CPSDR <x.Type> (FMOVDconst <x.Type> [math.Float64frombits(uint64(c))]) x))
(OR (AND (MOVDconst [-1<<63]) (LGDR x)) (LGDR (LPDFR <t> y))) => (LGDR (CPSDR <t> y x))
(OR (AND (MOVDconst [-1<<63]) (LGDR x)) (MOVDconst [c])) && c & -1<<63 == 0 => (LGDR (CPSDR <x.Type> (FMOVDconst <x.Type> [math.Float64frombits(uint64(c))]) x))
(OR (RISBGZ (LGDR x) {r}) (LGDR (LPDFR <t> y)))
&& r == s390x.NewRotateParams(0, 0, 0)
=> (LGDR (CPSDR <t> y x))
(OR (RISBGZ (LGDR x) {r}) (MOVDconst [c]))
&& c >= 0
&& r == s390x.NewRotateParams(0, 0, 0)
=> (LGDR (CPSDR <x.Type> (FMOVDconst <x.Type> [math.Float64frombits(uint64(c))]) x))
(CPSDR y (FMOVDconst [c])) && !math.Signbit(c) => (LPDFR y)
(CPSDR y (FMOVDconst [c])) && math.Signbit(c) => (LNDFR y)
@ -966,6 +1061,9 @@
(CMPWconst (ANDWconst _ [m]) [n]) && int32(m) >= 0 && int32(m) < int32(n) => (FlagLT)
(CMPWUconst (ANDWconst _ [m]) [n]) && uint32(m) < uint32(n) => (FlagLT)
(CMPconst (RISBGZ x {r}) [c]) && c > 0 && r.OutMask() < uint64(c) => (FlagLT)
(CMPUconst (RISBGZ x {r}) [c]) && r.OutMask() < uint64(uint32(c)) => (FlagLT)
// Constant compare-and-branch with immediate.
(CGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal != 0 && int64(x) == int64(y) => (First yes no)
(CGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less != 0 && int64(x) < int64(y) => (First yes no)

View File

@ -331,25 +331,26 @@ func init() {
{name: "LTEBR", argLength: 1, reg: fp1flags, asm: "LTEBR", typ: "Flags"}, // arg0 compare to 0, f32
{name: "SLD", argLength: 2, reg: sh21, asm: "SLD"}, // arg0 << arg1, shift amount is mod 64
{name: "SLW", argLength: 2, reg: sh21, asm: "SLW"}, // arg0 << arg1, shift amount is mod 32
{name: "SLW", argLength: 2, reg: sh21, asm: "SLW"}, // arg0 << arg1, shift amount is mod 64
{name: "SLDconst", argLength: 1, reg: gp11, asm: "SLD", aux: "Int8"}, // arg0 << auxint, shift amount 0-63
{name: "SLWconst", argLength: 1, reg: gp11, asm: "SLW", aux: "Int8"}, // arg0 << auxint, shift amount 0-31
{name: "SRD", argLength: 2, reg: sh21, asm: "SRD"}, // unsigned arg0 >> arg1, shift amount is mod 64
{name: "SRW", argLength: 2, reg: sh21, asm: "SRW"}, // unsigned uint32(arg0) >> arg1, shift amount is mod 32
{name: "SRW", argLength: 2, reg: sh21, asm: "SRW"}, // unsigned uint32(arg0) >> arg1, shift amount is mod 64
{name: "SRDconst", argLength: 1, reg: gp11, asm: "SRD", aux: "Int8"}, // unsigned arg0 >> auxint, shift amount 0-63
{name: "SRWconst", argLength: 1, reg: gp11, asm: "SRW", aux: "Int8"}, // unsigned uint32(arg0) >> auxint, shift amount 0-31
// Arithmetic shifts clobber flags.
{name: "SRAD", argLength: 2, reg: sh21, asm: "SRAD", clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 64
{name: "SRAW", argLength: 2, reg: sh21, asm: "SRAW", clobberFlags: true}, // signed int32(arg0) >> arg1, shift amount is mod 32
{name: "SRAW", argLength: 2, reg: sh21, asm: "SRAW", clobberFlags: true}, // signed int32(arg0) >> arg1, shift amount is mod 64
{name: "SRADconst", argLength: 1, reg: gp11, asm: "SRAD", aux: "Int8", clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-63
{name: "SRAWconst", argLength: 1, reg: gp11, asm: "SRAW", aux: "Int8", clobberFlags: true}, // signed int32(arg0) >> auxint, shift amount 0-31
{name: "RLLG", argLength: 2, reg: sh21, asm: "RLLG"}, // arg0 rotate left arg1, rotate amount 0-63
{name: "RLL", argLength: 2, reg: sh21, asm: "RLL"}, // arg0 rotate left arg1, rotate amount 0-31
{name: "RLLGconst", argLength: 1, reg: gp11, asm: "RLLG", aux: "Int8"}, // arg0 rotate left auxint, rotate amount 0-63
{name: "RLLconst", argLength: 1, reg: gp11, asm: "RLL", aux: "Int8"}, // arg0 rotate left auxint, rotate amount 0-31
// Rotate instructions.
// Note: no RLLGconst - use RISBGZ instead.
{name: "RLLG", argLength: 2, reg: sh21, asm: "RLLG"}, // arg0 rotate left arg1, rotate amount 0-63
{name: "RLL", argLength: 2, reg: sh21, asm: "RLL"}, // arg0 rotate left arg1, rotate amount 0-31
{name: "RLLconst", argLength: 1, reg: gp11, asm: "RLL", aux: "Int8"}, // arg0 rotate left auxint, rotate amount 0-31
// Rotate then (and|or|xor|insert) selected bits instructions.
//
@ -371,6 +372,7 @@ func init() {
// +-------------+-------+-----+--------+-----------------------+-----------------------+-----------------------+
//
{name: "RXSBG", argLength: 2, reg: gp21, asm: "RXSBG", resultInArg0: true, aux: "S390XRotateParams", clobberFlags: true}, // rotate then xor selected bits
{name: "RISBGZ", argLength: 1, reg: gp11, asm: "RISBGZ", aux: "S390XRotateParams", clobberFlags: true}, // rotate then insert selected bits [into zero]
// unary ops
{name: "NEG", argLength: 1, reg: gp11, asm: "NEG", clobberFlags: true}, // -arg0
@ -547,9 +549,9 @@ func init() {
// Atomic bitwise operations.
// Note: 'floor' operations round the pointer down to the nearest word boundary
// which reflects how they are used in the runtime.
{name: "LAN", argLength: 3, reg: gpstore, asm: "LAN", typ: "Mem", clobberFlags: true, hasSideEffects: true}, // *arg0 &= arg1. arg2 = mem.
{name: "LAN", argLength: 3, reg: gpstore, asm: "LAN", typ: "Mem", clobberFlags: true, hasSideEffects: true}, // *arg0 &= arg1. arg2 = mem.
{name: "LANfloor", argLength: 3, reg: gpstorelab, asm: "LAN", typ: "Mem", clobberFlags: true, hasSideEffects: true}, // *(floor(arg0, 4)) &= arg1. arg2 = mem.
{name: "LAO", argLength: 3, reg: gpstore, asm: "LAO", typ: "Mem", clobberFlags: true, hasSideEffects: true}, // *arg0 |= arg1. arg2 = mem.
{name: "LAO", argLength: 3, reg: gpstore, asm: "LAO", typ: "Mem", clobberFlags: true, hasSideEffects: true}, // *arg0 |= arg1. arg2 = mem.
{name: "LAOfloor", argLength: 3, reg: gpstorelab, asm: "LAO", typ: "Mem", clobberFlags: true, hasSideEffects: true}, // *(floor(arg0, 4)) |= arg1. arg2 = mem.
// Compare and swap.

View File

@ -2285,9 +2285,9 @@ const (
OpS390XSRAWconst
OpS390XRLLG
OpS390XRLL
OpS390XRLLGconst
OpS390XRLLconst
OpS390XRXSBG
OpS390XRISBGZ
OpS390XNEG
OpS390XNEGW
OpS390XNOT
@ -30739,20 +30739,6 @@ var opcodeTable = [...]opInfo{
},
},
},
{
name: "RLLGconst",
auxType: auxInt8,
argLen: 1,
asm: s390x.ARLLG,
reg: regInfo{
inputs: []inputInfo{
{0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
},
outputs: []outputInfo{
{0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
},
},
},
{
name: "RLLconst",
auxType: auxInt8,
@ -30784,6 +30770,21 @@ var opcodeTable = [...]opInfo{
},
},
},
{
name: "RISBGZ",
auxType: auxS390XRotateParams,
argLen: 1,
clobberFlags: true,
asm: s390x.ARISBGZ,
reg: regInfo{
inputs: []inputInfo{
{0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
},
outputs: []outputInfo{
{0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
},
},
},
{
name: "NEG",
argLen: 1,

File diff suppressed because it is too large Load Diff

View File

@ -4,6 +4,10 @@
package s390x
import (
"math/bits"
)
// RotateParams represents the immediates required for a "rotate
// then ... selected bits instruction".
//
@ -24,12 +28,18 @@ package s390x
// input left by. Note that this rotation is performed
// before the masked region is used.
type RotateParams struct {
Start uint8 // big-endian start bit index [0..63]
End uint8 // big-endian end bit index [0..63]
Amount uint8 // amount to rotate left
Start int8 // big-endian start bit index [0..63]
End int8 // big-endian end bit index [0..63]
Amount int8 // amount to rotate left
}
func NewRotateParams(start, end, amount int64) RotateParams {
// NewRotateParams creates a set of parameters representing a
// rotation left by the amount provided and a selection of the bits
// between the provided start and end indexes (inclusive).
//
// The start and end indexes and the rotation amount must all
// be in the range 0-63 inclusive or this function will panic.
func NewRotateParams(start, end, amount int8) RotateParams {
if start&^63 != 0 {
panic("start out of bounds")
}
@ -40,8 +50,66 @@ func NewRotateParams(start, end, amount int64) RotateParams {
panic("amount out of bounds")
}
return RotateParams{
Start: uint8(start),
End: uint8(end),
Amount: uint8(amount),
Start: start,
End: end,
Amount: amount,
}
}
// RotateLeft generates a new set of parameters with the rotation amount
// increased by the given value. The selected bits are left unchanged.
func (r RotateParams) RotateLeft(amount int8) RotateParams {
r.Amount += amount
r.Amount &= 63
return r
}
// OutMask provides a mask representing the selected bits.
func (r RotateParams) OutMask() uint64 {
// Note: z must be unsigned for bootstrap compiler
z := uint8(63-r.End+r.Start) & 63 // number of zero bits in mask
return bits.RotateLeft64(^uint64(0)<<z, -int(r.Start))
}
// InMask provides a mask representing the selected bits relative
// to the source value (i.e. pre-rotation).
func (r RotateParams) InMask() uint64 {
return bits.RotateLeft64(r.OutMask(), -int(r.Amount))
}
// OutMerge tries to generate a new set of parameters representing
// the intersection between the selected bits and the provided mask.
// If the intersection is unrepresentable (0 or not contiguous) nil
// will be returned.
func (r RotateParams) OutMerge(mask uint64) *RotateParams {
mask &= r.OutMask()
if mask == 0 {
return nil
}
// normalize the mask so that the set bits are left aligned
o := bits.LeadingZeros64(^mask)
mask = bits.RotateLeft64(mask, o)
z := bits.LeadingZeros64(mask)
mask = bits.RotateLeft64(mask, z)
// check that the normalized mask is contiguous
l := bits.LeadingZeros64(^mask)
if l+bits.TrailingZeros64(mask) != 64 {
return nil
}
// update start and end positions (rotation amount remains the same)
r.Start = int8(o+z) & 63
r.End = (r.Start + int8(l) - 1) & 63
return &r
}
// InMerge tries to generate a new set of parameters representing
// the intersection between the selected bits and the provided mask
// as applied to the source value (i.e. pre-rotation).
// If the intersection is unrepresentable (0 or not contiguous) nil
// will be returned.
func (r RotateParams) InMerge(mask uint64) *RotateParams {
return r.OutMerge(bits.RotateLeft64(mask, int(r.Amount)))
}

View File

@ -0,0 +1,122 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package s390x
import (
"testing"
)
func TestRotateParamsMask(t *testing.T) {
tests := []struct {
start, end, amount int8
inMask, outMask uint64
}{
// start before end, no rotation
{start: 0, end: 63, amount: 0, inMask: ^uint64(0), outMask: ^uint64(0)},
{start: 1, end: 63, amount: 0, inMask: ^uint64(0) >> 1, outMask: ^uint64(0) >> 1},
{start: 0, end: 62, amount: 0, inMask: ^uint64(1), outMask: ^uint64(1)},
{start: 1, end: 62, amount: 0, inMask: ^uint64(3) >> 1, outMask: ^uint64(3) >> 1},
// end before start, no rotation
{start: 63, end: 0, amount: 0, inMask: 1<<63 | 1, outMask: 1<<63 | 1},
{start: 62, end: 0, amount: 0, inMask: 1<<63 | 3, outMask: 1<<63 | 3},
{start: 63, end: 1, amount: 0, inMask: 3<<62 | 1, outMask: 3<<62 | 1},
{start: 62, end: 1, amount: 0, inMask: 3<<62 | 3, outMask: 3<<62 | 3},
// rotation
{start: 32, end: 63, amount: 32, inMask: 0xffffffff00000000, outMask: 0x00000000ffffffff},
{start: 48, end: 15, amount: 16, inMask: 0xffffffff00000000, outMask: 0xffff00000000ffff},
{start: 0, end: 7, amount: -8 & 63, inMask: 0xff, outMask: 0xff << 56},
}
for i, test := range tests {
r := NewRotateParams(test.start, test.end, test.amount)
if m := r.OutMask(); m != test.outMask {
t.Errorf("out mask %v: want %#x, got %#x", i, test.outMask, m)
}
if m := r.InMask(); m != test.inMask {
t.Errorf("in mask %v: want %#x, got %#x", i, test.inMask, m)
}
}
}
func TestRotateParamsMerge(t *testing.T) {
tests := []struct {
// inputs
src RotateParams
mask uint64
// results
in *RotateParams
out *RotateParams
}{
{
src: RotateParams{Start: 48, End: 15, Amount: 16},
mask: 0xffffffffffffffff,
in: &RotateParams{Start: 48, End: 15, Amount: 16},
out: &RotateParams{Start: 48, End: 15, Amount: 16},
},
{
src: RotateParams{Start: 16, End: 47, Amount: 0},
mask: 0x00000000ffffffff,
in: &RotateParams{Start: 32, End: 47, Amount: 0},
out: &RotateParams{Start: 32, End: 47, Amount: 0},
},
{
src: RotateParams{Start: 16, End: 47, Amount: 0},
mask: 0xffff00000000ffff,
in: nil,
out: nil,
},
{
src: RotateParams{Start: 0, End: 63, Amount: 0},
mask: 0xf7f0000000000000,
in: nil,
out: nil,
},
{
src: RotateParams{Start: 0, End: 63, Amount: 1},
mask: 0x000000000000ff00,
in: &RotateParams{Start: 47, End: 54, Amount: 1},
out: &RotateParams{Start: 48, End: 55, Amount: 1},
},
{
src: RotateParams{Start: 32, End: 63, Amount: 32},
mask: 0xffff00000000ffff,
in: &RotateParams{Start: 32, End: 47, Amount: 32},
out: &RotateParams{Start: 48, End: 63, Amount: 32},
},
{
src: RotateParams{Start: 0, End: 31, Amount: 32},
mask: 0x8000000000000000,
in: nil,
out: &RotateParams{Start: 0, End: 0, Amount: 32},
},
{
src: RotateParams{Start: 0, End: 31, Amount: 32},
mask: 0x0000000080000000,
in: &RotateParams{Start: 0, End: 0, Amount: 32},
out: nil,
},
}
eq := func(x, y *RotateParams) bool {
if x == nil && y == nil {
return true
}
if x == nil || y == nil {
return false
}
return *x == *y
}
for _, test := range tests {
if r := test.src.InMerge(test.mask); !eq(r, test.in) {
t.Errorf("%v merged with %#x (input): want %v, got %v", test.src, test.mask, test.in, r)
}
if r := test.src.OutMerge(test.mask); !eq(r, test.out) {
t.Errorf("%v merged with %#x (output): want %v, got %v", test.src, test.mask, test.out, r)
}
}
}

View File

@ -127,11 +127,13 @@ func sbfx6(x int32) int32 {
// ubfiz
func ubfiz1(x uint64) uint64 {
// arm64:"UBFIZ\t[$]3, R[0-9]+, [$]12",-"LSL",-"AND"
// s390x:"RISBGZ\t[$]49, [$]60, [$]3,",-"SLD",-"AND"
return (x & 0xfff) << 3
}
func ubfiz2(x uint64) uint64 {
// arm64:"UBFIZ\t[$]4, R[0-9]+, [$]12",-"LSL",-"AND"
// s390x:"RISBGZ\t[$]48, [$]59, [$]4,",-"SLD",-"AND"
return (x << 4) & 0xfff0
}
@ -149,6 +151,7 @@ func ubfiz5(x uint8) uint64 {
func ubfiz6(x uint64) uint64 {
// arm64:"UBFIZ\t[$]1, R[0-9]+, [$]60",-"LSL",-"LSR"
// s390x:"RISBGZ\t[$]3, [$]62, [$]1, ",-"SLD",-"SRD"
return (x << 4) >> 3
}
@ -159,6 +162,7 @@ func ubfiz7(x uint32) uint32 {
func ubfiz8(x uint64) uint64 {
// arm64:"UBFIZ\t[$]1, R[0-9]+, [$]20",-"LSL",-"LSR"
// s390x:"RISBGZ\t[$]43, [$]62, [$]1, ",-"SLD",-"SRD",-"AND"
return ((x & 0xfffff) << 4) >> 3
}
@ -169,17 +173,20 @@ func ubfiz9(x uint64) uint64 {
func ubfiz10(x uint64) uint64 {
// arm64:"UBFIZ\t[$]7, R[0-9]+, [$]12",-"LSL",-"LSR",-"AND"
// s390x:"RISBGZ\t[$]45, [$]56, [$]7, ",-"SLD",-"SRD",-"AND"
return ((x << 5) & (0xfff << 5)) << 2
}
// ubfx
func ubfx1(x uint64) uint64 {
// arm64:"UBFX\t[$]25, R[0-9]+, [$]10",-"LSR",-"AND"
// s390x:"RISBGZ\t[$]54, [$]63, [$]39, ",-"SRD",-"AND"
return (x >> 25) & 1023
}
func ubfx2(x uint64) uint64 {
// arm64:"UBFX\t[$]4, R[0-9]+, [$]8",-"LSR",-"AND"
// s390x:"RISBGZ\t[$]56, [$]63, [$]60, ",-"SRD",-"AND"
return (x & 0x0ff0) >> 4
}
@ -196,30 +203,37 @@ func ubfx5(x uint8) uint64 {
}
func ubfx6(x uint64) uint64 {
return (x << 1) >> 2 // arm64:"UBFX\t[$]1, R[0-9]+, [$]62",-"LSL",-"LSR"
// arm64:"UBFX\t[$]1, R[0-9]+, [$]62",-"LSL",-"LSR"
// s390x:"RISBGZ\t[$]2, [$]63, [$]63,",-"SLD",-"SRD"
return (x << 1) >> 2
}
func ubfx7(x uint32) uint32 {
return (x << 1) >> 2 // arm64:"UBFX\t[$]1, R[0-9]+, [$]30",-"LSL",-"LSR"
// arm64:"UBFX\t[$]1, R[0-9]+, [$]30",-"LSL",-"LSR"
return (x << 1) >> 2
}
func ubfx8(x uint64) uint64 {
// arm64:"UBFX\t[$]1, R[0-9]+, [$]12",-"LSL",-"LSR",-"AND"
// s390x:"RISBGZ\t[$]52, [$]63, [$]63,",-"SLD",-"SRD",-"AND"
return ((x << 1) >> 2) & 0xfff
}
func ubfx9(x uint64) uint64 {
// arm64:"UBFX\t[$]4, R[0-9]+, [$]11",-"LSL",-"LSR",-"AND"
// s390x:"RISBGZ\t[$]53, [$]63, [$]60, ",-"SLD",-"SRD",-"AND"
return ((x >> 3) & 0xfff) >> 1
}
func ubfx10(x uint64) uint64 {
// arm64:"UBFX\t[$]5, R[0-9]+, [$]56",-"LSL",-"LSR"
// s390x:"RISBGZ\t[$]8, [$]63, [$]59, ",-"SLD",-"SRD"
return ((x >> 2) << 5) >> 8
}
func ubfx11(x uint64) uint64 {
// arm64:"UBFX\t[$]1, R[0-9]+, [$]19",-"LSL",-"LSR"
// s390x:"RISBGZ\t[$]45, [$]63, [$]63, ",-"SLD",-"SRD",-"AND"
return ((x & 0xfffff) << 3) >> 4
}

View File

@ -340,3 +340,15 @@ func bitSetTest(x int) bool {
// amd64:"CMPQ\tAX, [$]9"
return x&9 == 9
}
// mask contiguous one bits
func cont1Mask64U(x uint64) uint64 {
// s390x:"RISBGZ\t[$]16, [$]47, [$]0,"
return x&0x0000ffffffff0000
}
// mask contiguous zero bits
func cont0Mask64U(x uint64) uint64 {
// s390x:"RISBGZ\t[$]48, [$]15, [$]0,"
return x&0xffff00000000ffff
}

View File

@ -213,7 +213,7 @@ func RotateLeft64(n uint64) uint64 {
// arm64:"ROR"
// ppc64:"ROTL"
// ppc64le:"ROTL"
// s390x:"RLLG"
// s390x:"RISBGZ\t[$]0, [$]63, [$]37, "
// wasm:"I64Rotl"
return bits.RotateLeft64(n, 37)
}

View File

@ -17,21 +17,21 @@ func rot64(x uint64) uint64 {
// amd64:"ROLQ\t[$]7"
// arm64:"ROR\t[$]57"
// s390x:"RLLG\t[$]7"
// s390x:"RISBGZ\t[$]0, [$]63, [$]7, "
// ppc64:"ROTL\t[$]7"
// ppc64le:"ROTL\t[$]7"
a += x<<7 | x>>57
// amd64:"ROLQ\t[$]8"
// arm64:"ROR\t[$]56"
// s390x:"RLLG\t[$]8"
// s390x:"RISBGZ\t[$]0, [$]63, [$]8, "
// ppc64:"ROTL\t[$]8"
// ppc64le:"ROTL\t[$]8"
a += x<<8 + x>>56
// amd64:"ROLQ\t[$]9"
// arm64:"ROR\t[$]55"
// s390x:"RLLG\t[$]9"
// s390x:"RISBGZ\t[$]0, [$]63, [$]9, "
// ppc64:"ROTL\t[$]9"
// ppc64le:"ROTL\t[$]9"
a += x<<9 ^ x>>55

View File

@ -11,84 +11,84 @@ package codegen
// ------------------ //
func lshMask64x64(v int64, s uint64) int64 {
// s390x:-".*AND",-".*MOVDGE"
// s390x:-"RISBGZ",-"AND",-"LOCGR"
// ppc64le:"ANDCC",-"ORN",-"ISEL"
// ppc64:"ANDCC",-"ORN",-"ISEL"
return v << (s & 63)
}
func rshMask64Ux64(v uint64, s uint64) uint64 {
// s390x:-".*AND",-".*MOVDGE"
// s390x:-"RISBGZ",-"AND",-"LOCGR"
// ppc64le:"ANDCC",-"ORN",-"ISEL"
// ppc64:"ANDCC",-"ORN",-"ISEL"
return v >> (s & 63)
}
func rshMask64x64(v int64, s uint64) int64 {
// s390x:-".*AND",-".*MOVDGE"
// s390x:-"RISBGZ",-"AND",-"LOCGR"
// ppc64le:"ANDCC",-ORN",-"ISEL"
// ppc64:"ANDCC",-"ORN",-"ISEL"
return v >> (s & 63)
}
func lshMask32x64(v int32, s uint64) int32 {
// s390x:-".*AND",-".*MOVDGE"
// s390x:-"RISBGZ",-"AND",-"LOCGR"
// ppc64le:"ISEL",-"ORN"
// ppc64:"ISEL",-"ORN"
return v << (s & 63)
}
func rshMask32Ux64(v uint32, s uint64) uint32 {
// s390x:-".*AND",-".*MOVDGE"
// s390x:-"RISBGZ",-"AND",-"LOCGR"
// ppc64le:"ISEL",-"ORN"
// ppc64:"ISEL",-"ORN"
return v >> (s & 63)
}
func rshMask32x64(v int32, s uint64) int32 {
// s390x:-".*AND",-".*MOVDGE"
// s390x:-"RISBGZ",-"AND",-"LOCGR"
// ppc64le:"ISEL",-"ORN"
// ppc64:"ISEL",-"ORN"
return v >> (s & 63)
}
func lshMask64x32(v int64, s uint32) int64 {
// s390x:-".*AND",-".*MOVDGE"
// s390x:-"RISBGZ",-"AND",-"LOCGR"
// ppc64le:"ANDCC",-"ORN"
// ppc64:"ANDCC",-"ORN"
return v << (s & 63)
}
func rshMask64Ux32(v uint64, s uint32) uint64 {
// s390x:-".*AND",-".*MOVDGE"
// s390x:-"RISBGZ",-"AND",-"LOCGR"
// ppc64le:"ANDCC",-"ORN"
// ppc64:"ANDCC",-"ORN"
return v >> (s & 63)
}
func rshMask64x32(v int64, s uint32) int64 {
// s390x:-".*AND",-".*MOVDGE"
// s390x:-"RISBGZ",-"AND",-"LOCGR"
// ppc64le:"ANDCC",-"ORN",-"ISEL"
// ppc64:"ANDCC",-"ORN",-"ISEL"
return v >> (s & 63)
}
func lshMask64x32Ext(v int64, s int32) int64 {
// s390x:-".*AND",-".*MOVDGE"
// s390x:-"RISBGZ",-"AND",-"LOCGR"
// ppc64le:"ANDCC",-"ORN",-"ISEL"
// ppc64:"ANDCC",-"ORN",-"ISEL"
return v << uint(s&63)
}
func rshMask64Ux32Ext(v uint64, s int32) uint64 {
// s390x:-".*AND",-".*MOVDGE"
// s390x:-"RISBGZ",-"AND",-"LOCGR"
// ppc64le:"ANDCC",-"ORN",-"ISEL"
// ppc64:"ANDCC",-"ORN",-"ISEL"
return v >> uint(s&63)
}
func rshMask64x32Ext(v int64, s int32) int64 {
// s390x:-".*AND",-".*MOVDGE"
// s390x:-"RISBGZ",-"AND",-"LOCGR"
// ppc64le:"ANDCC",-"ORN",-"ISEL"
// ppc64:"ANDCC",-"ORN",-"ISEL"
return v >> uint(s&63)
@ -128,7 +128,8 @@ func lshSignedMasked(v8 int8, v16 int16, v32 int32, v64 int64, x int) {
func rshGuarded64(v int64, s uint) int64 {
if s < 64 {
// s390x:-".*AND",-".*MOVDGE" wasm:-"Select",-".*LtU"
// s390x:-"RISBGZ",-"AND",-"LOCGR"
// wasm:-"Select",-".*LtU"
return v >> s
}
panic("shift too large")
@ -136,7 +137,8 @@ func rshGuarded64(v int64, s uint) int64 {
func rshGuarded64U(v uint64, s uint) uint64 {
if s < 64 {
// s390x:-".*AND",-".*MOVDGE" wasm:-"Select",-".*LtU"
// s390x:-"RISBGZ",-"AND",-"LOCGR"
// wasm:-"Select",-".*LtU"
return v >> s
}
panic("shift too large")
@ -144,7 +146,8 @@ func rshGuarded64U(v uint64, s uint) uint64 {
func lshGuarded64(v int64, s uint) int64 {
if s < 64 {
// s390x:-".*AND",-".*MOVDGE" wasm:-"Select",-".*LtU"
// s390x:-"RISBGZ",-"AND",-"LOCGR"
// wasm:-"Select",-".*LtU"
return v << s
}
panic("shift too large")