1
0
mirror of https://github.com/golang/go synced 2024-11-19 13:44:52 -07:00

cmd/compile/internal/ssa: refactor ARM64 address folding

These patterns are the only uses of isArg and isAuto, and they all
follow a common pattern too. Extract out so that we can more easily
tweak the interface for isArg/isAuto.

Passes toolstash -cmp for linux/arm64.

Change-Id: I9c509dabdc123c93cb1ad2f34fe8c12a9f313f6d
Reviewed-on: https://go-review.googlesource.com/40490
Run-TryBot: Matthew Dempsky <mdempsky@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Cherry Zhang <cherryyz@google.com>
This commit is contained in:
Matthew Dempsky 2017-04-12 11:24:03 -07:00
parent 2e60882fc7
commit 4eb48a336e
3 changed files with 150 additions and 185 deletions

View File

@ -525,152 +525,103 @@
(ADDconst [off1] (MOVDaddr [off2] {sym} ptr)) -> (MOVDaddr [off1+off2] {sym} ptr) (ADDconst [off1] (MOVDaddr [off2] {sym} ptr)) -> (MOVDaddr [off1+off2] {sym} ptr)
// fold address into load/store // fold address into load/store
// only small offset (between -256 and 256) or offset that is a multiple of data size (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem) && fitsARM64Offset(off1+off2, 1, sym) ->
// can be encoded in the instructions
// since this rewriting takes place before stack allocation, the offset to SP is unknown,
// so don't do it for args and locals with unaligned offset
(MOVBload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(off1+off2) ->
(MOVBload [off1+off2] {sym} ptr mem) (MOVBload [off1+off2] {sym} ptr mem)
(MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem) && fitsARM64Offset(off1+off2, 1, sym) ->
(MOVBUload [off1+off2] {sym} ptr mem) (MOVBUload [off1+off2] {sym} ptr mem)
(MOVHload [off1] {sym} (ADDconst [off2] ptr) mem) (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem) && fitsARM64Offset(off1+off2, 2, sym) ->
&& is32Bit(off1+off2) && !isArg(sym)
&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
(MOVHload [off1+off2] {sym} ptr mem) (MOVHload [off1+off2] {sym} ptr mem)
(MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem) (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem) && fitsARM64Offset(off1+off2, 2, sym) ->
&& is32Bit(off1+off2) && !isArg(sym)
&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
(MOVHUload [off1+off2] {sym} ptr mem) (MOVHUload [off1+off2] {sym} ptr mem)
(MOVWload [off1] {sym} (ADDconst [off2] ptr) mem) (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem) && fitsARM64Offset(off1+off2, 4, sym) ->
&& is32Bit(off1+off2) && !isArg(sym)
&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
(MOVWload [off1+off2] {sym} ptr mem) (MOVWload [off1+off2] {sym} ptr mem)
(MOVWUload [off1] {sym} (ADDconst [off2] ptr) mem) (MOVWUload [off1] {sym} (ADDconst [off2] ptr) mem) && fitsARM64Offset(off1+off2, 4, sym) ->
&& is32Bit(off1+off2) && !isArg(sym)
&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
(MOVWUload [off1+off2] {sym} ptr mem) (MOVWUload [off1+off2] {sym} ptr mem)
(MOVDload [off1] {sym} (ADDconst [off2] ptr) mem) (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && fitsARM64Offset(off1+off2, 8, sym) ->
&& is32Bit(off1+off2) && !isArg(sym)
&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
(MOVDload [off1+off2] {sym} ptr mem) (MOVDload [off1+off2] {sym} ptr mem)
(FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) && fitsARM64Offset(off1+off2, 4, sym) ->
&& is32Bit(off1+off2) && !isArg(sym)
&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
(FMOVSload [off1+off2] {sym} ptr mem) (FMOVSload [off1+off2] {sym} ptr mem)
(FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && fitsARM64Offset(off1+off2, 8, sym) ->
&& is32Bit(off1+off2) && !isArg(sym)
&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
(FMOVDload [off1+off2] {sym} ptr mem) (FMOVDload [off1+off2] {sym} ptr mem)
(MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) && fitsARM64Offset(off1+off2, 1, sym) ->
(MOVBstore [off1+off2] {sym} ptr val mem) (MOVBstore [off1+off2] {sym} ptr val mem)
(MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) && fitsARM64Offset(off1+off2, 2, sym) ->
&& is32Bit(off1+off2) && !isArg(sym)
&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
(MOVHstore [off1+off2] {sym} ptr val mem) (MOVHstore [off1+off2] {sym} ptr val mem)
(MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem) (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem) && fitsARM64Offset(off1+off2, 4, sym) ->
&& is32Bit(off1+off2) && !isArg(sym)
&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
(MOVWstore [off1+off2] {sym} ptr val mem) (MOVWstore [off1+off2] {sym} ptr val mem)
(MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && fitsARM64Offset(off1+off2, 8, sym) ->
&& is32Bit(off1+off2) && !isArg(sym)
&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
(MOVDstore [off1+off2] {sym} ptr val mem) (MOVDstore [off1+off2] {sym} ptr val mem)
(FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) && fitsARM64Offset(off1+off2, 4, sym) ->
&& is32Bit(off1+off2) && !isArg(sym)
&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
(FMOVSstore [off1+off2] {sym} ptr val mem) (FMOVSstore [off1+off2] {sym} ptr val mem)
(FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && fitsARM64Offset(off1+off2, 8, sym) ->
&& is32Bit(off1+off2) && !isArg(sym)
&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
(FMOVDstore [off1+off2] {sym} ptr val mem) (FMOVDstore [off1+off2] {sym} ptr val mem)
(MOVBstorezero [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVBstorezero [off1] {sym} (ADDconst [off2] ptr) mem) && fitsARM64Offset(off1+off2, 1, sym) ->
(MOVBstorezero [off1+off2] {sym} ptr mem) (MOVBstorezero [off1+off2] {sym} ptr mem)
(MOVHstorezero [off1] {sym} (ADDconst [off2] ptr) mem) (MOVHstorezero [off1] {sym} (ADDconst [off2] ptr) mem) && fitsARM64Offset(off1+off2, 2, sym) ->
&& is32Bit(off1+off2) && !isArg(sym)
&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
(MOVHstorezero [off1+off2] {sym} ptr mem) (MOVHstorezero [off1+off2] {sym} ptr mem)
(MOVWstorezero [off1] {sym} (ADDconst [off2] ptr) mem) (MOVWstorezero [off1] {sym} (ADDconst [off2] ptr) mem) && fitsARM64Offset(off1+off2, 4, sym) ->
&& is32Bit(off1+off2) && !isArg(sym)
&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
(MOVWstorezero [off1+off2] {sym} ptr mem) (MOVWstorezero [off1+off2] {sym} ptr mem)
(MOVDstorezero [off1] {sym} (ADDconst [off2] ptr) mem) (MOVDstorezero [off1] {sym} (ADDconst [off2] ptr) mem) && fitsARM64Offset(off1+off2, 8, sym) ->
&& is32Bit(off1+off2) && !isArg(sym)
&& ((off1+off2)%2==8 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
(MOVDstorezero [off1+off2] {sym} ptr mem) (MOVDstorezero [off1+off2] {sym} ptr mem)
(MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) (MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
&& is32Bit(off1+off2) -> && canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 1, mergeSym(sym1, sym2)) ->
(MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVBUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) (MOVBUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
&& is32Bit(off1+off2) -> && canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 1, mergeSym(sym1, sym2)) ->
(MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) (MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
&& is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 2, mergeSym(sym1, sym2)) ->
&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
(MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVHUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) (MOVHUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
&& is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 2, mergeSym(sym1, sym2)) ->
&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
(MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) (MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
&& is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 4, mergeSym(sym1, sym2)) ->
&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
(MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVWUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) (MOVWUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
&& is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 4, mergeSym(sym1, sym2)) ->
&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
(MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) (MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
&& is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 8, mergeSym(sym1, sym2)) ->
&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
(MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
&& is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 4, mergeSym(sym1, sym2)) ->
&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
(FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
&& is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 8, mergeSym(sym1, sym2)) ->
&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
(FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) (MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
&& is32Bit(off1+off2) -> && canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 1, mergeSym(sym1, sym2)) ->
(MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) (MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
&& is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 2, mergeSym(sym1, sym2)) ->
&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
(MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) (MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
&& is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 4, mergeSym(sym1, sym2)) ->
&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
(MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) (MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
&& is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 8, mergeSym(sym1, sym2)) ->
&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
(MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
&& is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 4, mergeSym(sym1, sym2)) ->
&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
(FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
&& is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 8, mergeSym(sym1, sym2)) ->
&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
(FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(MOVBstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) (MOVBstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
&& is32Bit(off1+off2) -> && canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 1, mergeSym(sym1, sym2)) ->
(MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVHstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) (MOVHstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
&& is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 2, mergeSym(sym1, sym2)) ->
&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
(MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVWstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) (MOVWstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
&& is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 4, mergeSym(sym1, sym2)) ->
&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
(MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVDstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) (MOVDstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
&& is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 8, mergeSym(sym1, sym2)) ->
&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
(MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
// store zero // store zero
@ -1260,7 +1211,7 @@
y0:(MOVDnop x0:(MOVBUload [i1] {s} p mem)) y0:(MOVDnop x0:(MOVBUload [i1] {s} p mem))
y1:(MOVDnop x1:(MOVBUload [i0] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i0] {s} p mem)))
&& i1 == i0+1 && i1 == i0+1
&& (i0%2 == 0 || i0<256 && i0>-256 && !isArg(s) && !isAuto(s)) && fitsARM64Offset(i0, 2, s)
&& x0.Uses == 1 && x1.Uses == 1 && x0.Uses == 1 && x1.Uses == 1
&& y0.Uses == 1 && y1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1
&& mergePoint(b,x0,x1) != nil && mergePoint(b,x0,x1) != nil

View File

@ -284,6 +284,20 @@ func isAuto(s interface{}) bool {
return ok return ok
} }
func fitsARM64Offset(off, align int64, sym interface{}) bool {
// only small offset (between -256 and 256) or offset that is a multiple of data size
// can be encoded in the instructions
// since this rewriting takes place before stack allocation, the offset to SP is unknown,
// so don't do it for args and locals with unaligned offset
if !is32Bit(off) {
return false
}
if align == 1 {
return true
}
return !isArg(sym) && (off%align == 0 || off < 256 && off > -256 && !isAuto(sym))
}
// isSameSym returns whether sym is the same as the given named symbol // isSameSym returns whether sym is the same as the given named symbol
func isSameSym(sym interface{}, name string) bool { func isSameSym(sym interface{}, name string) bool {
s, ok := sym.(fmt.Stringer) s, ok := sym.(fmt.Stringer)

View File

@ -2702,7 +2702,7 @@ func rewriteValueARM64_OpARM64Equal(v *Value) bool {
} }
func rewriteValueARM64_OpARM64FMOVDload(v *Value) bool { func rewriteValueARM64_OpARM64FMOVDload(v *Value) bool {
// match: (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) // match: (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
// cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) // cond: fitsARM64Offset(off1+off2, 8, sym)
// result: (FMOVDload [off1+off2] {sym} ptr mem) // result: (FMOVDload [off1+off2] {sym} ptr mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -2714,7 +2714,7 @@ func rewriteValueARM64_OpARM64FMOVDload(v *Value) bool {
off2 := v_0.AuxInt off2 := v_0.AuxInt
ptr := v_0.Args[0] ptr := v_0.Args[0]
mem := v.Args[1] mem := v.Args[1]
if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) { if !(fitsARM64Offset(off1+off2, 8, sym)) {
break break
} }
v.reset(OpARM64FMOVDload) v.reset(OpARM64FMOVDload)
@ -2725,7 +2725,7 @@ func rewriteValueARM64_OpARM64FMOVDload(v *Value) bool {
return true return true
} }
// match: (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) // match: (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) // cond: canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 8, mergeSym(sym1, sym2))
// result: (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) // result: (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -2738,7 +2738,7 @@ func rewriteValueARM64_OpARM64FMOVDload(v *Value) bool {
sym2 := v_0.Aux sym2 := v_0.Aux
ptr := v_0.Args[0] ptr := v_0.Args[0]
mem := v.Args[1] mem := v.Args[1]
if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) { if !(canMergeSym(sym1, sym2) && fitsARM64Offset(off1+off2, 8, mergeSym(sym1, sym2))) {
break break
} }
v.reset(OpARM64FMOVDload) v.reset(OpARM64FMOVDload)
@ -2752,7 +2752,7 @@ func rewriteValueARM64_OpARM64FMOVDload(v *Value) bool {
} }
func rewriteValueARM64_OpARM64FMOVDstore(v *Value) bool { func rewriteValueARM64_OpARM64FMOVDstore(v *Value) bool {
// match: (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) // match: (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
// cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) // cond: fitsARM64Offset(off1+off2, 8, sym)
// result: (FMOVDstore [off1+off2] {sym} ptr val mem) // result: (FMOVDstore [off1+off2] {sym} ptr val mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -2765,7 +2765,7 @@ func rewriteValueARM64_OpARM64FMOVDstore(v *Value) bool {
ptr := v_0.Args[0] ptr := v_0.Args[0]
val := v.Args[1] val := v.Args[1]
mem := v.Args[2] mem := v.Args[2]
if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) { if !(fitsARM64Offset(off1+off2, 8, sym)) {
break break
} }
v.reset(OpARM64FMOVDstore) v.reset(OpARM64FMOVDstore)
@ -2777,7 +2777,7 @@ func rewriteValueARM64_OpARM64FMOVDstore(v *Value) bool {
return true return true
} }
// match: (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) // match: (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) // cond: canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 8, mergeSym(sym1, sym2))
// result: (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) // result: (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -2791,7 +2791,7 @@ func rewriteValueARM64_OpARM64FMOVDstore(v *Value) bool {
ptr := v_0.Args[0] ptr := v_0.Args[0]
val := v.Args[1] val := v.Args[1]
mem := v.Args[2] mem := v.Args[2]
if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) { if !(canMergeSym(sym1, sym2) && fitsARM64Offset(off1+off2, 8, mergeSym(sym1, sym2))) {
break break
} }
v.reset(OpARM64FMOVDstore) v.reset(OpARM64FMOVDstore)
@ -2806,7 +2806,7 @@ func rewriteValueARM64_OpARM64FMOVDstore(v *Value) bool {
} }
func rewriteValueARM64_OpARM64FMOVSload(v *Value) bool { func rewriteValueARM64_OpARM64FMOVSload(v *Value) bool {
// match: (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) // match: (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem)
// cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) // cond: fitsARM64Offset(off1+off2, 4, sym)
// result: (FMOVSload [off1+off2] {sym} ptr mem) // result: (FMOVSload [off1+off2] {sym} ptr mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -2818,7 +2818,7 @@ func rewriteValueARM64_OpARM64FMOVSload(v *Value) bool {
off2 := v_0.AuxInt off2 := v_0.AuxInt
ptr := v_0.Args[0] ptr := v_0.Args[0]
mem := v.Args[1] mem := v.Args[1]
if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) { if !(fitsARM64Offset(off1+off2, 4, sym)) {
break break
} }
v.reset(OpARM64FMOVSload) v.reset(OpARM64FMOVSload)
@ -2829,7 +2829,7 @@ func rewriteValueARM64_OpARM64FMOVSload(v *Value) bool {
return true return true
} }
// match: (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) // match: (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) // cond: canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 4, mergeSym(sym1, sym2))
// result: (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) // result: (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -2842,7 +2842,7 @@ func rewriteValueARM64_OpARM64FMOVSload(v *Value) bool {
sym2 := v_0.Aux sym2 := v_0.Aux
ptr := v_0.Args[0] ptr := v_0.Args[0]
mem := v.Args[1] mem := v.Args[1]
if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) { if !(canMergeSym(sym1, sym2) && fitsARM64Offset(off1+off2, 4, mergeSym(sym1, sym2))) {
break break
} }
v.reset(OpARM64FMOVSload) v.reset(OpARM64FMOVSload)
@ -2856,7 +2856,7 @@ func rewriteValueARM64_OpARM64FMOVSload(v *Value) bool {
} }
func rewriteValueARM64_OpARM64FMOVSstore(v *Value) bool { func rewriteValueARM64_OpARM64FMOVSstore(v *Value) bool {
// match: (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) // match: (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem)
// cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) // cond: fitsARM64Offset(off1+off2, 4, sym)
// result: (FMOVSstore [off1+off2] {sym} ptr val mem) // result: (FMOVSstore [off1+off2] {sym} ptr val mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -2869,7 +2869,7 @@ func rewriteValueARM64_OpARM64FMOVSstore(v *Value) bool {
ptr := v_0.Args[0] ptr := v_0.Args[0]
val := v.Args[1] val := v.Args[1]
mem := v.Args[2] mem := v.Args[2]
if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) { if !(fitsARM64Offset(off1+off2, 4, sym)) {
break break
} }
v.reset(OpARM64FMOVSstore) v.reset(OpARM64FMOVSstore)
@ -2881,7 +2881,7 @@ func rewriteValueARM64_OpARM64FMOVSstore(v *Value) bool {
return true return true
} }
// match: (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) // match: (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) // cond: canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 4, mergeSym(sym1, sym2))
// result: (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) // result: (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -2895,7 +2895,7 @@ func rewriteValueARM64_OpARM64FMOVSstore(v *Value) bool {
ptr := v_0.Args[0] ptr := v_0.Args[0]
val := v.Args[1] val := v.Args[1]
mem := v.Args[2] mem := v.Args[2]
if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) { if !(canMergeSym(sym1, sym2) && fitsARM64Offset(off1+off2, 4, mergeSym(sym1, sym2))) {
break break
} }
v.reset(OpARM64FMOVSstore) v.reset(OpARM64FMOVSstore)
@ -3560,7 +3560,7 @@ func rewriteValueARM64_OpARM64MODW(v *Value) bool {
} }
func rewriteValueARM64_OpARM64MOVBUload(v *Value) bool { func rewriteValueARM64_OpARM64MOVBUload(v *Value) bool {
// match: (MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem) // match: (MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem)
// cond: is32Bit(off1+off2) // cond: fitsARM64Offset(off1+off2, 1, sym)
// result: (MOVBUload [off1+off2] {sym} ptr mem) // result: (MOVBUload [off1+off2] {sym} ptr mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -3572,7 +3572,7 @@ func rewriteValueARM64_OpARM64MOVBUload(v *Value) bool {
off2 := v_0.AuxInt off2 := v_0.AuxInt
ptr := v_0.Args[0] ptr := v_0.Args[0]
mem := v.Args[1] mem := v.Args[1]
if !(is32Bit(off1 + off2)) { if !(fitsARM64Offset(off1+off2, 1, sym)) {
break break
} }
v.reset(OpARM64MOVBUload) v.reset(OpARM64MOVBUload)
@ -3583,7 +3583,7 @@ func rewriteValueARM64_OpARM64MOVBUload(v *Value) bool {
return true return true
} }
// match: (MOVBUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) // match: (MOVBUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) // cond: canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 1, mergeSym(sym1, sym2))
// result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) // result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -3596,7 +3596,7 @@ func rewriteValueARM64_OpARM64MOVBUload(v *Value) bool {
sym2 := v_0.Aux sym2 := v_0.Aux
ptr := v_0.Args[0] ptr := v_0.Args[0]
mem := v.Args[1] mem := v.Args[1]
if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { if !(canMergeSym(sym1, sym2) && fitsARM64Offset(off1+off2, 1, mergeSym(sym1, sym2))) {
break break
} }
v.reset(OpARM64MOVBUload) v.reset(OpARM64MOVBUload)
@ -3671,7 +3671,7 @@ func rewriteValueARM64_OpARM64MOVBUreg(v *Value) bool {
} }
func rewriteValueARM64_OpARM64MOVBload(v *Value) bool { func rewriteValueARM64_OpARM64MOVBload(v *Value) bool {
// match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem) // match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem)
// cond: is32Bit(off1+off2) // cond: fitsARM64Offset(off1+off2, 1, sym)
// result: (MOVBload [off1+off2] {sym} ptr mem) // result: (MOVBload [off1+off2] {sym} ptr mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -3683,7 +3683,7 @@ func rewriteValueARM64_OpARM64MOVBload(v *Value) bool {
off2 := v_0.AuxInt off2 := v_0.AuxInt
ptr := v_0.Args[0] ptr := v_0.Args[0]
mem := v.Args[1] mem := v.Args[1]
if !(is32Bit(off1 + off2)) { if !(fitsARM64Offset(off1+off2, 1, sym)) {
break break
} }
v.reset(OpARM64MOVBload) v.reset(OpARM64MOVBload)
@ -3694,7 +3694,7 @@ func rewriteValueARM64_OpARM64MOVBload(v *Value) bool {
return true return true
} }
// match: (MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) // match: (MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) // cond: canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 1, mergeSym(sym1, sym2))
// result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -3707,7 +3707,7 @@ func rewriteValueARM64_OpARM64MOVBload(v *Value) bool {
sym2 := v_0.Aux sym2 := v_0.Aux
ptr := v_0.Args[0] ptr := v_0.Args[0]
mem := v.Args[1] mem := v.Args[1]
if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { if !(canMergeSym(sym1, sym2) && fitsARM64Offset(off1+off2, 1, mergeSym(sym1, sym2))) {
break break
} }
v.reset(OpARM64MOVBload) v.reset(OpARM64MOVBload)
@ -3782,7 +3782,7 @@ func rewriteValueARM64_OpARM64MOVBreg(v *Value) bool {
} }
func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
// match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) // match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem)
// cond: is32Bit(off1+off2) // cond: fitsARM64Offset(off1+off2, 1, sym)
// result: (MOVBstore [off1+off2] {sym} ptr val mem) // result: (MOVBstore [off1+off2] {sym} ptr val mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -3795,7 +3795,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
ptr := v_0.Args[0] ptr := v_0.Args[0]
val := v.Args[1] val := v.Args[1]
mem := v.Args[2] mem := v.Args[2]
if !(is32Bit(off1 + off2)) { if !(fitsARM64Offset(off1+off2, 1, sym)) {
break break
} }
v.reset(OpARM64MOVBstore) v.reset(OpARM64MOVBstore)
@ -3807,7 +3807,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
return true return true
} }
// match: (MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) // match: (MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) // cond: canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 1, mergeSym(sym1, sym2))
// result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -3821,7 +3821,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
ptr := v_0.Args[0] ptr := v_0.Args[0]
val := v.Args[1] val := v.Args[1]
mem := v.Args[2] mem := v.Args[2]
if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { if !(canMergeSym(sym1, sym2) && fitsARM64Offset(off1+off2, 1, mergeSym(sym1, sym2))) {
break break
} }
v.reset(OpARM64MOVBstore) v.reset(OpARM64MOVBstore)
@ -3984,7 +3984,7 @@ func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
} }
func rewriteValueARM64_OpARM64MOVBstorezero(v *Value) bool { func rewriteValueARM64_OpARM64MOVBstorezero(v *Value) bool {
// match: (MOVBstorezero [off1] {sym} (ADDconst [off2] ptr) mem) // match: (MOVBstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
// cond: is32Bit(off1+off2) // cond: fitsARM64Offset(off1+off2, 1, sym)
// result: (MOVBstorezero [off1+off2] {sym} ptr mem) // result: (MOVBstorezero [off1+off2] {sym} ptr mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -3996,7 +3996,7 @@ func rewriteValueARM64_OpARM64MOVBstorezero(v *Value) bool {
off2 := v_0.AuxInt off2 := v_0.AuxInt
ptr := v_0.Args[0] ptr := v_0.Args[0]
mem := v.Args[1] mem := v.Args[1]
if !(is32Bit(off1 + off2)) { if !(fitsARM64Offset(off1+off2, 1, sym)) {
break break
} }
v.reset(OpARM64MOVBstorezero) v.reset(OpARM64MOVBstorezero)
@ -4007,7 +4007,7 @@ func rewriteValueARM64_OpARM64MOVBstorezero(v *Value) bool {
return true return true
} }
// match: (MOVBstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) // match: (MOVBstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) // cond: canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 1, mergeSym(sym1, sym2))
// result: (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) // result: (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -4020,7 +4020,7 @@ func rewriteValueARM64_OpARM64MOVBstorezero(v *Value) bool {
sym2 := v_0.Aux sym2 := v_0.Aux
ptr := v_0.Args[0] ptr := v_0.Args[0]
mem := v.Args[1] mem := v.Args[1]
if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { if !(canMergeSym(sym1, sym2) && fitsARM64Offset(off1+off2, 1, mergeSym(sym1, sym2))) {
break break
} }
v.reset(OpARM64MOVBstorezero) v.reset(OpARM64MOVBstorezero)
@ -4034,7 +4034,7 @@ func rewriteValueARM64_OpARM64MOVBstorezero(v *Value) bool {
} }
func rewriteValueARM64_OpARM64MOVDload(v *Value) bool { func rewriteValueARM64_OpARM64MOVDload(v *Value) bool {
// match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem) // match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
// cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) // cond: fitsARM64Offset(off1+off2, 8, sym)
// result: (MOVDload [off1+off2] {sym} ptr mem) // result: (MOVDload [off1+off2] {sym} ptr mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -4046,7 +4046,7 @@ func rewriteValueARM64_OpARM64MOVDload(v *Value) bool {
off2 := v_0.AuxInt off2 := v_0.AuxInt
ptr := v_0.Args[0] ptr := v_0.Args[0]
mem := v.Args[1] mem := v.Args[1]
if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) { if !(fitsARM64Offset(off1+off2, 8, sym)) {
break break
} }
v.reset(OpARM64MOVDload) v.reset(OpARM64MOVDload)
@ -4057,7 +4057,7 @@ func rewriteValueARM64_OpARM64MOVDload(v *Value) bool {
return true return true
} }
// match: (MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) // match: (MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) // cond: canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 8, mergeSym(sym1, sym2))
// result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -4070,7 +4070,7 @@ func rewriteValueARM64_OpARM64MOVDload(v *Value) bool {
sym2 := v_0.Aux sym2 := v_0.Aux
ptr := v_0.Args[0] ptr := v_0.Args[0]
mem := v.Args[1] mem := v.Args[1]
if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) { if !(canMergeSym(sym1, sym2) && fitsARM64Offset(off1+off2, 8, mergeSym(sym1, sym2))) {
break break
} }
v.reset(OpARM64MOVDload) v.reset(OpARM64MOVDload)
@ -4133,7 +4133,7 @@ func rewriteValueARM64_OpARM64MOVDreg(v *Value) bool {
} }
func rewriteValueARM64_OpARM64MOVDstore(v *Value) bool { func rewriteValueARM64_OpARM64MOVDstore(v *Value) bool {
// match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) // match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
// cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) // cond: fitsARM64Offset(off1+off2, 8, sym)
// result: (MOVDstore [off1+off2] {sym} ptr val mem) // result: (MOVDstore [off1+off2] {sym} ptr val mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -4146,7 +4146,7 @@ func rewriteValueARM64_OpARM64MOVDstore(v *Value) bool {
ptr := v_0.Args[0] ptr := v_0.Args[0]
val := v.Args[1] val := v.Args[1]
mem := v.Args[2] mem := v.Args[2]
if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) { if !(fitsARM64Offset(off1+off2, 8, sym)) {
break break
} }
v.reset(OpARM64MOVDstore) v.reset(OpARM64MOVDstore)
@ -4158,7 +4158,7 @@ func rewriteValueARM64_OpARM64MOVDstore(v *Value) bool {
return true return true
} }
// match: (MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) // match: (MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) // cond: canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 8, mergeSym(sym1, sym2))
// result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -4172,7 +4172,7 @@ func rewriteValueARM64_OpARM64MOVDstore(v *Value) bool {
ptr := v_0.Args[0] ptr := v_0.Args[0]
val := v.Args[1] val := v.Args[1]
mem := v.Args[2] mem := v.Args[2]
if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) { if !(canMergeSym(sym1, sym2) && fitsARM64Offset(off1+off2, 8, mergeSym(sym1, sym2))) {
break break
} }
v.reset(OpARM64MOVDstore) v.reset(OpARM64MOVDstore)
@ -4209,7 +4209,7 @@ func rewriteValueARM64_OpARM64MOVDstore(v *Value) bool {
} }
func rewriteValueARM64_OpARM64MOVDstorezero(v *Value) bool { func rewriteValueARM64_OpARM64MOVDstorezero(v *Value) bool {
// match: (MOVDstorezero [off1] {sym} (ADDconst [off2] ptr) mem) // match: (MOVDstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
// cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%2==8 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) // cond: fitsARM64Offset(off1+off2, 8, sym)
// result: (MOVDstorezero [off1+off2] {sym} ptr mem) // result: (MOVDstorezero [off1+off2] {sym} ptr mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -4221,7 +4221,7 @@ func rewriteValueARM64_OpARM64MOVDstorezero(v *Value) bool {
off2 := v_0.AuxInt off2 := v_0.AuxInt
ptr := v_0.Args[0] ptr := v_0.Args[0]
mem := v.Args[1] mem := v.Args[1]
if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%2 == 8 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) { if !(fitsARM64Offset(off1+off2, 8, sym)) {
break break
} }
v.reset(OpARM64MOVDstorezero) v.reset(OpARM64MOVDstorezero)
@ -4232,7 +4232,7 @@ func rewriteValueARM64_OpARM64MOVDstorezero(v *Value) bool {
return true return true
} }
// match: (MOVDstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) // match: (MOVDstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) // cond: canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 8, mergeSym(sym1, sym2))
// result: (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) // result: (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -4245,7 +4245,7 @@ func rewriteValueARM64_OpARM64MOVDstorezero(v *Value) bool {
sym2 := v_0.Aux sym2 := v_0.Aux
ptr := v_0.Args[0] ptr := v_0.Args[0]
mem := v.Args[1] mem := v.Args[1]
if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) { if !(canMergeSym(sym1, sym2) && fitsARM64Offset(off1+off2, 8, mergeSym(sym1, sym2))) {
break break
} }
v.reset(OpARM64MOVDstorezero) v.reset(OpARM64MOVDstorezero)
@ -4259,7 +4259,7 @@ func rewriteValueARM64_OpARM64MOVDstorezero(v *Value) bool {
} }
func rewriteValueARM64_OpARM64MOVHUload(v *Value) bool { func rewriteValueARM64_OpARM64MOVHUload(v *Value) bool {
// match: (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem) // match: (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem)
// cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) // cond: fitsARM64Offset(off1+off2, 2, sym)
// result: (MOVHUload [off1+off2] {sym} ptr mem) // result: (MOVHUload [off1+off2] {sym} ptr mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -4271,7 +4271,7 @@ func rewriteValueARM64_OpARM64MOVHUload(v *Value) bool {
off2 := v_0.AuxInt off2 := v_0.AuxInt
ptr := v_0.Args[0] ptr := v_0.Args[0]
mem := v.Args[1] mem := v.Args[1]
if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) { if !(fitsARM64Offset(off1+off2, 2, sym)) {
break break
} }
v.reset(OpARM64MOVHUload) v.reset(OpARM64MOVHUload)
@ -4282,7 +4282,7 @@ func rewriteValueARM64_OpARM64MOVHUload(v *Value) bool {
return true return true
} }
// match: (MOVHUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) // match: (MOVHUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) // cond: canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 2, mergeSym(sym1, sym2))
// result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) // result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -4295,7 +4295,7 @@ func rewriteValueARM64_OpARM64MOVHUload(v *Value) bool {
sym2 := v_0.Aux sym2 := v_0.Aux
ptr := v_0.Args[0] ptr := v_0.Args[0]
mem := v.Args[1] mem := v.Args[1]
if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) { if !(canMergeSym(sym1, sym2) && fitsARM64Offset(off1+off2, 2, mergeSym(sym1, sym2))) {
break break
} }
v.reset(OpARM64MOVHUload) v.reset(OpARM64MOVHUload)
@ -4394,7 +4394,7 @@ func rewriteValueARM64_OpARM64MOVHUreg(v *Value) bool {
} }
func rewriteValueARM64_OpARM64MOVHload(v *Value) bool { func rewriteValueARM64_OpARM64MOVHload(v *Value) bool {
// match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem) // match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem)
// cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) // cond: fitsARM64Offset(off1+off2, 2, sym)
// result: (MOVHload [off1+off2] {sym} ptr mem) // result: (MOVHload [off1+off2] {sym} ptr mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -4406,7 +4406,7 @@ func rewriteValueARM64_OpARM64MOVHload(v *Value) bool {
off2 := v_0.AuxInt off2 := v_0.AuxInt
ptr := v_0.Args[0] ptr := v_0.Args[0]
mem := v.Args[1] mem := v.Args[1]
if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) { if !(fitsARM64Offset(off1+off2, 2, sym)) {
break break
} }
v.reset(OpARM64MOVHload) v.reset(OpARM64MOVHload)
@ -4417,7 +4417,7 @@ func rewriteValueARM64_OpARM64MOVHload(v *Value) bool {
return true return true
} }
// match: (MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) // match: (MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) // cond: canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 2, mergeSym(sym1, sym2))
// result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -4430,7 +4430,7 @@ func rewriteValueARM64_OpARM64MOVHload(v *Value) bool {
sym2 := v_0.Aux sym2 := v_0.Aux
ptr := v_0.Args[0] ptr := v_0.Args[0]
mem := v.Args[1] mem := v.Args[1]
if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) { if !(canMergeSym(sym1, sym2) && fitsARM64Offset(off1+off2, 2, mergeSym(sym1, sym2))) {
break break
} }
v.reset(OpARM64MOVHload) v.reset(OpARM64MOVHload)
@ -4553,7 +4553,7 @@ func rewriteValueARM64_OpARM64MOVHreg(v *Value) bool {
} }
func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool { func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool {
// match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) // match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem)
// cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) // cond: fitsARM64Offset(off1+off2, 2, sym)
// result: (MOVHstore [off1+off2] {sym} ptr val mem) // result: (MOVHstore [off1+off2] {sym} ptr val mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -4566,7 +4566,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool {
ptr := v_0.Args[0] ptr := v_0.Args[0]
val := v.Args[1] val := v.Args[1]
mem := v.Args[2] mem := v.Args[2]
if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) { if !(fitsARM64Offset(off1+off2, 2, sym)) {
break break
} }
v.reset(OpARM64MOVHstore) v.reset(OpARM64MOVHstore)
@ -4578,7 +4578,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool {
return true return true
} }
// match: (MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) // match: (MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) // cond: canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 2, mergeSym(sym1, sym2))
// result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -4592,7 +4592,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool {
ptr := v_0.Args[0] ptr := v_0.Args[0]
val := v.Args[1] val := v.Args[1]
mem := v.Args[2] mem := v.Args[2]
if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) { if !(canMergeSym(sym1, sym2) && fitsARM64Offset(off1+off2, 2, mergeSym(sym1, sym2))) {
break break
} }
v.reset(OpARM64MOVHstore) v.reset(OpARM64MOVHstore)
@ -4713,7 +4713,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool {
} }
func rewriteValueARM64_OpARM64MOVHstorezero(v *Value) bool { func rewriteValueARM64_OpARM64MOVHstorezero(v *Value) bool {
// match: (MOVHstorezero [off1] {sym} (ADDconst [off2] ptr) mem) // match: (MOVHstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
// cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) // cond: fitsARM64Offset(off1+off2, 2, sym)
// result: (MOVHstorezero [off1+off2] {sym} ptr mem) // result: (MOVHstorezero [off1+off2] {sym} ptr mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -4725,7 +4725,7 @@ func rewriteValueARM64_OpARM64MOVHstorezero(v *Value) bool {
off2 := v_0.AuxInt off2 := v_0.AuxInt
ptr := v_0.Args[0] ptr := v_0.Args[0]
mem := v.Args[1] mem := v.Args[1]
if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) { if !(fitsARM64Offset(off1+off2, 2, sym)) {
break break
} }
v.reset(OpARM64MOVHstorezero) v.reset(OpARM64MOVHstorezero)
@ -4736,7 +4736,7 @@ func rewriteValueARM64_OpARM64MOVHstorezero(v *Value) bool {
return true return true
} }
// match: (MOVHstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) // match: (MOVHstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) // cond: canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 2, mergeSym(sym1, sym2))
// result: (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) // result: (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -4749,7 +4749,7 @@ func rewriteValueARM64_OpARM64MOVHstorezero(v *Value) bool {
sym2 := v_0.Aux sym2 := v_0.Aux
ptr := v_0.Args[0] ptr := v_0.Args[0]
mem := v.Args[1] mem := v.Args[1]
if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) { if !(canMergeSym(sym1, sym2) && fitsARM64Offset(off1+off2, 2, mergeSym(sym1, sym2))) {
break break
} }
v.reset(OpARM64MOVHstorezero) v.reset(OpARM64MOVHstorezero)
@ -4763,7 +4763,7 @@ func rewriteValueARM64_OpARM64MOVHstorezero(v *Value) bool {
} }
func rewriteValueARM64_OpARM64MOVWUload(v *Value) bool { func rewriteValueARM64_OpARM64MOVWUload(v *Value) bool {
// match: (MOVWUload [off1] {sym} (ADDconst [off2] ptr) mem) // match: (MOVWUload [off1] {sym} (ADDconst [off2] ptr) mem)
// cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) // cond: fitsARM64Offset(off1+off2, 4, sym)
// result: (MOVWUload [off1+off2] {sym} ptr mem) // result: (MOVWUload [off1+off2] {sym} ptr mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -4775,7 +4775,7 @@ func rewriteValueARM64_OpARM64MOVWUload(v *Value) bool {
off2 := v_0.AuxInt off2 := v_0.AuxInt
ptr := v_0.Args[0] ptr := v_0.Args[0]
mem := v.Args[1] mem := v.Args[1]
if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) { if !(fitsARM64Offset(off1+off2, 4, sym)) {
break break
} }
v.reset(OpARM64MOVWUload) v.reset(OpARM64MOVWUload)
@ -4786,7 +4786,7 @@ func rewriteValueARM64_OpARM64MOVWUload(v *Value) bool {
return true return true
} }
// match: (MOVWUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) // match: (MOVWUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) // cond: canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 4, mergeSym(sym1, sym2))
// result: (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) // result: (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -4799,7 +4799,7 @@ func rewriteValueARM64_OpARM64MOVWUload(v *Value) bool {
sym2 := v_0.Aux sym2 := v_0.Aux
ptr := v_0.Args[0] ptr := v_0.Args[0]
mem := v.Args[1] mem := v.Args[1]
if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) { if !(canMergeSym(sym1, sym2) && fitsARM64Offset(off1+off2, 4, mergeSym(sym1, sym2))) {
break break
} }
v.reset(OpARM64MOVWUload) v.reset(OpARM64MOVWUload)
@ -4922,7 +4922,7 @@ func rewriteValueARM64_OpARM64MOVWUreg(v *Value) bool {
} }
func rewriteValueARM64_OpARM64MOVWload(v *Value) bool { func rewriteValueARM64_OpARM64MOVWload(v *Value) bool {
// match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem) // match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem)
// cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) // cond: fitsARM64Offset(off1+off2, 4, sym)
// result: (MOVWload [off1+off2] {sym} ptr mem) // result: (MOVWload [off1+off2] {sym} ptr mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -4934,7 +4934,7 @@ func rewriteValueARM64_OpARM64MOVWload(v *Value) bool {
off2 := v_0.AuxInt off2 := v_0.AuxInt
ptr := v_0.Args[0] ptr := v_0.Args[0]
mem := v.Args[1] mem := v.Args[1]
if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) { if !(fitsARM64Offset(off1+off2, 4, sym)) {
break break
} }
v.reset(OpARM64MOVWload) v.reset(OpARM64MOVWload)
@ -4945,7 +4945,7 @@ func rewriteValueARM64_OpARM64MOVWload(v *Value) bool {
return true return true
} }
// match: (MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) // match: (MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) // cond: canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 4, mergeSym(sym1, sym2))
// result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -4958,7 +4958,7 @@ func rewriteValueARM64_OpARM64MOVWload(v *Value) bool {
sym2 := v_0.Aux sym2 := v_0.Aux
ptr := v_0.Args[0] ptr := v_0.Args[0]
mem := v.Args[1] mem := v.Args[1]
if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) { if !(canMergeSym(sym1, sym2) && fitsARM64Offset(off1+off2, 4, mergeSym(sym1, sym2))) {
break break
} }
v.reset(OpARM64MOVWload) v.reset(OpARM64MOVWload)
@ -5129,7 +5129,7 @@ func rewriteValueARM64_OpARM64MOVWreg(v *Value) bool {
} }
func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool { func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool {
// match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem) // match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem)
// cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) // cond: fitsARM64Offset(off1+off2, 4, sym)
// result: (MOVWstore [off1+off2] {sym} ptr val mem) // result: (MOVWstore [off1+off2] {sym} ptr val mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -5142,7 +5142,7 @@ func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool {
ptr := v_0.Args[0] ptr := v_0.Args[0]
val := v.Args[1] val := v.Args[1]
mem := v.Args[2] mem := v.Args[2]
if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) { if !(fitsARM64Offset(off1+off2, 4, sym)) {
break break
} }
v.reset(OpARM64MOVWstore) v.reset(OpARM64MOVWstore)
@ -5154,7 +5154,7 @@ func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool {
return true return true
} }
// match: (MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) // match: (MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) // cond: canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 4, mergeSym(sym1, sym2))
// result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -5168,7 +5168,7 @@ func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool {
ptr := v_0.Args[0] ptr := v_0.Args[0]
val := v.Args[1] val := v.Args[1]
mem := v.Args[2] mem := v.Args[2]
if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) { if !(canMergeSym(sym1, sym2) && fitsARM64Offset(off1+off2, 4, mergeSym(sym1, sym2))) {
break break
} }
v.reset(OpARM64MOVWstore) v.reset(OpARM64MOVWstore)
@ -5247,7 +5247,7 @@ func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool {
} }
func rewriteValueARM64_OpARM64MOVWstorezero(v *Value) bool { func rewriteValueARM64_OpARM64MOVWstorezero(v *Value) bool {
// match: (MOVWstorezero [off1] {sym} (ADDconst [off2] ptr) mem) // match: (MOVWstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
// cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) // cond: fitsARM64Offset(off1+off2, 4, sym)
// result: (MOVWstorezero [off1+off2] {sym} ptr mem) // result: (MOVWstorezero [off1+off2] {sym} ptr mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -5259,7 +5259,7 @@ func rewriteValueARM64_OpARM64MOVWstorezero(v *Value) bool {
off2 := v_0.AuxInt off2 := v_0.AuxInt
ptr := v_0.Args[0] ptr := v_0.Args[0]
mem := v.Args[1] mem := v.Args[1]
if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) { if !(fitsARM64Offset(off1+off2, 4, sym)) {
break break
} }
v.reset(OpARM64MOVWstorezero) v.reset(OpARM64MOVWstorezero)
@ -5270,7 +5270,7 @@ func rewriteValueARM64_OpARM64MOVWstorezero(v *Value) bool {
return true return true
} }
// match: (MOVWstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) // match: (MOVWstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) // cond: canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 4, mergeSym(sym1, sym2))
// result: (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) // result: (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -5283,7 +5283,7 @@ func rewriteValueARM64_OpARM64MOVWstorezero(v *Value) bool {
sym2 := v_0.Aux sym2 := v_0.Aux
ptr := v_0.Args[0] ptr := v_0.Args[0]
mem := v.Args[1] mem := v.Args[1]
if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) { if !(canMergeSym(sym1, sym2) && fitsARM64Offset(off1+off2, 4, mergeSym(sym1, sym2))) {
break break
} }
v.reset(OpARM64MOVWstorezero) v.reset(OpARM64MOVWstorezero)
@ -8112,7 +8112,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
return true return true
} }
// match: (ORshiftLL <t> [8] y0:(MOVDnop x0:(MOVBUload [i1] {s} p mem)) y1:(MOVDnop x1:(MOVBUload [i0] {s} p mem))) // match: (ORshiftLL <t> [8] y0:(MOVDnop x0:(MOVBUload [i1] {s} p mem)) y1:(MOVDnop x1:(MOVBUload [i0] {s} p mem)))
// cond: i1 == i0+1 && (i0%2 == 0 || i0<256 && i0>-256 && !isArg(s) && !isAuto(s)) && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1) // cond: i1 == i0+1 && fitsARM64Offset(i0, 2, s) && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1)
// result: @mergePoint(b,x0,x1) (REV16W <t> (MOVHUload <t> [i0] {s} p mem)) // result: @mergePoint(b,x0,x1) (REV16W <t> (MOVHUload <t> [i0] {s} p mem))
for { for {
t := v.Type t := v.Type
@ -8149,7 +8149,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
if mem != x1.Args[1] { if mem != x1.Args[1] {
break break
} }
if !(i1 == i0+1 && (i0%2 == 0 || i0 < 256 && i0 > -256 && !isArg(s) && !isAuto(s)) && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1)) { if !(i1 == i0+1 && fitsARM64Offset(i0, 2, s) && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1)) {
break break
} }
b = mergePoint(b, x0, x1) b = mergePoint(b, x0, x1)