1
0
mirror of https://github.com/golang/go synced 2024-11-11 22:20:22 -07:00

cmd/compile: do not fold offset into load/store for args on ARM64

Args may be not at 8-byte aligned offset to SP. When the stack
frame is large, folding the offset of args may cause large
unaligned offsets that does not fit in a machine instruction on
ARM64. Therefore disable folding offsets for args.

This has small performance impact (see below). A better fix would
be letting the assembler backend fix up the offset by loading it
into a register if it doesn't fit into an instruction. And the
compiler can simply generate large load/stores with offset. Since
in most of the cases the offset is aligned or the stack frame is
small, it can fit in an instruction and no fixup is needed. But
this is too complicated for Go 1.8.

name                     old time/op    new time/op    delta
BinaryTree17-8              8.30s ± 0%     8.31s ± 0%    ~     (p=0.579 n=10+10)
Fannkuch11-8                6.14s ± 0%     6.18s ± 0%  +0.53%  (p=0.000 n=9+10)
FmtFprintfEmpty-8           117ns ± 0%     117ns ± 0%    ~     (all equal)
FmtFprintfString-8          196ns ± 0%     197ns ± 0%  +0.72%  (p=0.000 n=10+10)
FmtFprintfInt-8             204ns ± 0%     205ns ± 0%  +0.49%  (p=0.000 n=9+10)
FmtFprintfIntInt-8          302ns ± 0%     307ns ± 1%  +1.46%  (p=0.000 n=10+10)
FmtFprintfPrefixedInt-8     329ns ± 2%     326ns ± 0%    ~     (p=0.083 n=10+10)
FmtFprintfFloat-8           540ns ± 0%     542ns ± 0%  +0.46%  (p=0.000 n=8+7)
FmtManyArgs-8              1.20µs ± 1%    1.19µs ± 1%  -1.02%  (p=0.000 n=10+10)
GobDecode-8                17.3ms ± 1%    17.8ms ± 0%  +2.75%  (p=0.000 n=10+7)
GobEncode-8                15.3ms ± 1%    15.4ms ± 0%  +0.57%  (p=0.004 n=9+10)
Gzip-8                      789ms ± 0%     803ms ± 0%  +1.78%  (p=0.000 n=9+10)
Gunzip-8                    128ms ± 0%     130ms ± 0%  +1.73%  (p=0.000 n=10+9)
HTTPClientServer-8          202µs ± 6%     201µs ±10%    ~     (p=0.739 n=10+10)
JSONEncode-8               42.0ms ± 0%    42.1ms ± 0%  +0.19%  (p=0.028 n=10+9)
JSONDecode-8                159ms ± 0%     161ms ± 0%  +1.05%  (p=0.000 n=9+10)
Mandelbrot200-8            10.1ms ± 0%    10.1ms ± 0%  -0.07%  (p=0.000 n=10+9)
GoParse-8                  8.46ms ± 1%    8.61ms ± 1%  +1.77%  (p=0.000 n=10+10)
RegexpMatchEasy0_32-8       227ns ± 1%     226ns ± 0%  -0.35%  (p=0.001 n=10+9)
RegexpMatchEasy0_1K-8      1.63µs ± 0%    1.63µs ± 0%  -0.13%  (p=0.000 n=10+9)
RegexpMatchEasy1_32-8       250ns ± 0%     249ns ± 0%  -0.40%  (p=0.001 n=8+9)
RegexpMatchEasy1_1K-8      2.07µs ± 0%    2.08µs ± 0%  +0.05%  (p=0.027 n=9+9)
RegexpMatchMedium_32-8      350ns ± 0%     350ns ± 0%    ~     (p=0.412 n=9+8)
RegexpMatchMedium_1K-8      104µs ± 0%     104µs ± 0%  +0.31%  (p=0.000 n=10+7)
RegexpMatchHard_32-8       5.82µs ± 0%    5.82µs ± 0%    ~     (p=0.937 n=9+9)
RegexpMatchHard_1K-8        176µs ± 0%     176µs ± 0%  +0.03%  (p=0.000 n=9+8)
Revcomp-8                   1.36s ± 1%     1.37s ± 1%    ~     (p=0.218 n=10+10)
Template-8                  151ms ± 1%     156ms ± 1%  +3.21%  (p=0.000 n=10+10)
TimeParse-8                 737ns ± 0%     758ns ± 2%  +2.74%  (p=0.000 n=10+10)
TimeFormat-8                801ns ± 2%     789ns ± 1%  -1.51%  (p=0.000 n=10+10)
[Geo mean]                  142µs          143µs       +0.50%

Fixes #19137.

Change-Id: Ib8a21ea98c0ffb2d282a586535b213cc163e1b67
Reviewed-on: https://go-review.googlesource.com/37251
Run-TryBot: Cherry Zhang <cherryyz@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
This commit is contained in:
Cherry Zhang 2017-02-18 21:03:15 -05:00
parent 174058038c
commit 6464e5dc4b
3 changed files with 133 additions and 120 deletions

View File

@ -530,69 +530,69 @@
(MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(off1+off2) ->
(MOVBUload [off1+off2] {sym} ptr mem) (MOVBUload [off1+off2] {sym} ptr mem)
(MOVHload [off1] {sym} (ADDconst [off2] ptr) mem) (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem)
&& is32Bit(off1+off2) && is32Bit(off1+off2) && !isArg(sym)
&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) -> && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
(MOVHload [off1+off2] {sym} ptr mem) (MOVHload [off1+off2] {sym} ptr mem)
(MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem) (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem)
&& is32Bit(off1+off2) && is32Bit(off1+off2) && !isArg(sym)
&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) -> && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
(MOVHUload [off1+off2] {sym} ptr mem) (MOVHUload [off1+off2] {sym} ptr mem)
(MOVWload [off1] {sym} (ADDconst [off2] ptr) mem) (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem)
&& is32Bit(off1+off2) && is32Bit(off1+off2) && !isArg(sym)
&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) -> && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
(MOVWload [off1+off2] {sym} ptr mem) (MOVWload [off1+off2] {sym} ptr mem)
(MOVWUload [off1] {sym} (ADDconst [off2] ptr) mem) (MOVWUload [off1] {sym} (ADDconst [off2] ptr) mem)
&& is32Bit(off1+off2) && is32Bit(off1+off2) && !isArg(sym)
&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) -> && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
(MOVWUload [off1+off2] {sym} ptr mem) (MOVWUload [off1+off2] {sym} ptr mem)
(MOVDload [off1] {sym} (ADDconst [off2] ptr) mem) (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
&& is32Bit(off1+off2) && is32Bit(off1+off2) && !isArg(sym)
&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) -> && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
(MOVDload [off1+off2] {sym} ptr mem) (MOVDload [off1+off2] {sym} ptr mem)
(FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem)
&& is32Bit(off1+off2) && is32Bit(off1+off2) && !isArg(sym)
&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) -> && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
(FMOVSload [off1+off2] {sym} ptr mem) (FMOVSload [off1+off2] {sym} ptr mem)
(FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
&& is32Bit(off1+off2) && is32Bit(off1+off2) && !isArg(sym)
&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) -> && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
(FMOVDload [off1+off2] {sym} ptr mem) (FMOVDload [off1+off2] {sym} ptr mem)
(MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(off1+off2) ->
(MOVBstore [off1+off2] {sym} ptr val mem) (MOVBstore [off1+off2] {sym} ptr val mem)
(MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem)
&& is32Bit(off1+off2) && is32Bit(off1+off2) && !isArg(sym)
&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) -> && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
(MOVHstore [off1+off2] {sym} ptr val mem) (MOVHstore [off1+off2] {sym} ptr val mem)
(MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem) (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem)
&& is32Bit(off1+off2) && is32Bit(off1+off2) && !isArg(sym)
&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) -> && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
(MOVWstore [off1+off2] {sym} ptr val mem) (MOVWstore [off1+off2] {sym} ptr val mem)
(MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
&& is32Bit(off1+off2) && is32Bit(off1+off2) && !isArg(sym)
&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) -> && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
(MOVDstore [off1+off2] {sym} ptr val mem) (MOVDstore [off1+off2] {sym} ptr val mem)
(FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem)
&& is32Bit(off1+off2) && is32Bit(off1+off2) && !isArg(sym)
&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) -> && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
(FMOVSstore [off1+off2] {sym} ptr val mem) (FMOVSstore [off1+off2] {sym} ptr val mem)
(FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
&& is32Bit(off1+off2) && is32Bit(off1+off2) && !isArg(sym)
&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) -> && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
(FMOVDstore [off1+off2] {sym} ptr val mem) (FMOVDstore [off1+off2] {sym} ptr val mem)
(MOVBstorezero [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVBstorezero [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(off1+off2) ->
(MOVBstorezero [off1+off2] {sym} ptr mem) (MOVBstorezero [off1+off2] {sym} ptr mem)
(MOVHstorezero [off1] {sym} (ADDconst [off2] ptr) mem) (MOVHstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
&& is32Bit(off1+off2) && is32Bit(off1+off2) && !isArg(sym)
&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) -> && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
(MOVHstorezero [off1+off2] {sym} ptr mem) (MOVHstorezero [off1+off2] {sym} ptr mem)
(MOVWstorezero [off1] {sym} (ADDconst [off2] ptr) mem) (MOVWstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
&& is32Bit(off1+off2) && is32Bit(off1+off2) && !isArg(sym)
&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) -> && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
(MOVWstorezero [off1+off2] {sym} ptr mem) (MOVWstorezero [off1+off2] {sym} ptr mem)
(MOVDstorezero [off1] {sym} (ADDconst [off2] ptr) mem) (MOVDstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
&& is32Bit(off1+off2) && is32Bit(off1+off2) && !isArg(sym)
&& ((off1+off2)%2==8 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) -> && ((off1+off2)%2==8 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
(MOVDstorezero [off1+off2] {sym} ptr mem) (MOVDstorezero [off1+off2] {sym} ptr mem)
(MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) (MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
@ -602,71 +602,71 @@
&& is32Bit(off1+off2) -> && is32Bit(off1+off2) ->
(MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) (MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
&& is32Bit(off1+off2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2))) -> && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
(MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVHUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) (MOVHUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
&& is32Bit(off1+off2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2))) -> && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
(MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) (MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
&& is32Bit(off1+off2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2))) -> && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
(MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVWUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) (MOVWUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
&& is32Bit(off1+off2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2))) -> && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
(MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) (MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
&& is32Bit(off1+off2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2))) -> && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
(MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
&& is32Bit(off1+off2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2))) -> && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
(FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
&& is32Bit(off1+off2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2))) -> && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
(FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) (MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
&& is32Bit(off1+off2) -> && is32Bit(off1+off2) ->
(MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) (MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
&& is32Bit(off1+off2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2))) -> && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
(MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) (MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
&& is32Bit(off1+off2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2))) -> && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
(MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) (MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
&& is32Bit(off1+off2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2))) -> && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
(MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
&& is32Bit(off1+off2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2))) -> && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
(FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
&& is32Bit(off1+off2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2))) -> && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
(FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(MOVBstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) (MOVBstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
&& is32Bit(off1+off2) -> && is32Bit(off1+off2) ->
(MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVHstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) (MOVHstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
&& is32Bit(off1+off2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2))) -> && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
(MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVWstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) (MOVWstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
&& is32Bit(off1+off2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2))) -> && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
(MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVDstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) (MOVDstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
&& is32Bit(off1+off2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2))) -> && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
(MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
// store zero // store zero

View File

@ -2717,7 +2717,7 @@ func rewriteValueARM64_OpARM64FMOVDload(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) // match: (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
// cond: is32Bit(off1+off2) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) // cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
// result: (FMOVDload [off1+off2] {sym} ptr mem) // result: (FMOVDload [off1+off2] {sym} ptr mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -2729,7 +2729,7 @@ func rewriteValueARM64_OpARM64FMOVDload(v *Value, config *Config) bool {
off2 := v_0.AuxInt off2 := v_0.AuxInt
ptr := v_0.Args[0] ptr := v_0.Args[0]
mem := v.Args[1] mem := v.Args[1]
if !(is32Bit(off1+off2) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) { if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
break break
} }
v.reset(OpARM64FMOVDload) v.reset(OpARM64FMOVDload)
@ -2740,7 +2740,7 @@ func rewriteValueARM64_OpARM64FMOVDload(v *Value, config *Config) bool {
return true return true
} }
// match: (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) // match: (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2))) // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
// result: (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) // result: (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -2753,7 +2753,7 @@ func rewriteValueARM64_OpARM64FMOVDload(v *Value, config *Config) bool {
sym2 := v_0.Aux sym2 := v_0.Aux
ptr := v_0.Args[0] ptr := v_0.Args[0]
mem := v.Args[1] mem := v.Args[1]
if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(mergeSym(sym1, sym2)) && !isAuto(mergeSym(sym1, sym2)))) { if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
break break
} }
v.reset(OpARM64FMOVDload) v.reset(OpARM64FMOVDload)
@ -2769,7 +2769,7 @@ func rewriteValueARM64_OpARM64FMOVDstore(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) // match: (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
// cond: is32Bit(off1+off2) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) // cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
// result: (FMOVDstore [off1+off2] {sym} ptr val mem) // result: (FMOVDstore [off1+off2] {sym} ptr val mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -2782,7 +2782,7 @@ func rewriteValueARM64_OpARM64FMOVDstore(v *Value, config *Config) bool {
ptr := v_0.Args[0] ptr := v_0.Args[0]
val := v.Args[1] val := v.Args[1]
mem := v.Args[2] mem := v.Args[2]
if !(is32Bit(off1+off2) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) { if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
break break
} }
v.reset(OpARM64FMOVDstore) v.reset(OpARM64FMOVDstore)
@ -2794,7 +2794,7 @@ func rewriteValueARM64_OpARM64FMOVDstore(v *Value, config *Config) bool {
return true return true
} }
// match: (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) // match: (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2))) // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
// result: (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) // result: (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -2808,7 +2808,7 @@ func rewriteValueARM64_OpARM64FMOVDstore(v *Value, config *Config) bool {
ptr := v_0.Args[0] ptr := v_0.Args[0]
val := v.Args[1] val := v.Args[1]
mem := v.Args[2] mem := v.Args[2]
if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(mergeSym(sym1, sym2)) && !isAuto(mergeSym(sym1, sym2)))) { if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
break break
} }
v.reset(OpARM64FMOVDstore) v.reset(OpARM64FMOVDstore)
@ -2825,7 +2825,7 @@ func rewriteValueARM64_OpARM64FMOVSload(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) // match: (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem)
// cond: is32Bit(off1+off2) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) // cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
// result: (FMOVSload [off1+off2] {sym} ptr mem) // result: (FMOVSload [off1+off2] {sym} ptr mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -2837,7 +2837,7 @@ func rewriteValueARM64_OpARM64FMOVSload(v *Value, config *Config) bool {
off2 := v_0.AuxInt off2 := v_0.AuxInt
ptr := v_0.Args[0] ptr := v_0.Args[0]
mem := v.Args[1] mem := v.Args[1]
if !(is32Bit(off1+off2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) { if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
break break
} }
v.reset(OpARM64FMOVSload) v.reset(OpARM64FMOVSload)
@ -2848,7 +2848,7 @@ func rewriteValueARM64_OpARM64FMOVSload(v *Value, config *Config) bool {
return true return true
} }
// match: (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) // match: (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2))) // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
// result: (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) // result: (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -2861,7 +2861,7 @@ func rewriteValueARM64_OpARM64FMOVSload(v *Value, config *Config) bool {
sym2 := v_0.Aux sym2 := v_0.Aux
ptr := v_0.Args[0] ptr := v_0.Args[0]
mem := v.Args[1] mem := v.Args[1]
if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(mergeSym(sym1, sym2)) && !isAuto(mergeSym(sym1, sym2)))) { if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
break break
} }
v.reset(OpARM64FMOVSload) v.reset(OpARM64FMOVSload)
@ -2877,7 +2877,7 @@ func rewriteValueARM64_OpARM64FMOVSstore(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) // match: (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem)
// cond: is32Bit(off1+off2) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) // cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
// result: (FMOVSstore [off1+off2] {sym} ptr val mem) // result: (FMOVSstore [off1+off2] {sym} ptr val mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -2890,7 +2890,7 @@ func rewriteValueARM64_OpARM64FMOVSstore(v *Value, config *Config) bool {
ptr := v_0.Args[0] ptr := v_0.Args[0]
val := v.Args[1] val := v.Args[1]
mem := v.Args[2] mem := v.Args[2]
if !(is32Bit(off1+off2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) { if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
break break
} }
v.reset(OpARM64FMOVSstore) v.reset(OpARM64FMOVSstore)
@ -2902,7 +2902,7 @@ func rewriteValueARM64_OpARM64FMOVSstore(v *Value, config *Config) bool {
return true return true
} }
// match: (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) // match: (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2))) // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
// result: (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) // result: (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -2916,7 +2916,7 @@ func rewriteValueARM64_OpARM64FMOVSstore(v *Value, config *Config) bool {
ptr := v_0.Args[0] ptr := v_0.Args[0]
val := v.Args[1] val := v.Args[1]
mem := v.Args[2] mem := v.Args[2]
if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(mergeSym(sym1, sym2)) && !isAuto(mergeSym(sym1, sym2)))) { if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
break break
} }
v.reset(OpARM64FMOVSstore) v.reset(OpARM64FMOVSstore)
@ -4089,7 +4089,7 @@ func rewriteValueARM64_OpARM64MOVDload(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem) // match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
// cond: is32Bit(off1+off2) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) // cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
// result: (MOVDload [off1+off2] {sym} ptr mem) // result: (MOVDload [off1+off2] {sym} ptr mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -4101,7 +4101,7 @@ func rewriteValueARM64_OpARM64MOVDload(v *Value, config *Config) bool {
off2 := v_0.AuxInt off2 := v_0.AuxInt
ptr := v_0.Args[0] ptr := v_0.Args[0]
mem := v.Args[1] mem := v.Args[1]
if !(is32Bit(off1+off2) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) { if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
break break
} }
v.reset(OpARM64MOVDload) v.reset(OpARM64MOVDload)
@ -4112,7 +4112,7 @@ func rewriteValueARM64_OpARM64MOVDload(v *Value, config *Config) bool {
return true return true
} }
// match: (MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) // match: (MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2))) // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
// result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -4125,7 +4125,7 @@ func rewriteValueARM64_OpARM64MOVDload(v *Value, config *Config) bool {
sym2 := v_0.Aux sym2 := v_0.Aux
ptr := v_0.Args[0] ptr := v_0.Args[0]
mem := v.Args[1] mem := v.Args[1]
if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(mergeSym(sym1, sym2)) && !isAuto(mergeSym(sym1, sym2)))) { if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
break break
} }
v.reset(OpARM64MOVDload) v.reset(OpARM64MOVDload)
@ -4192,7 +4192,7 @@ func rewriteValueARM64_OpARM64MOVDstore(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) // match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
// cond: is32Bit(off1+off2) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) // cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
// result: (MOVDstore [off1+off2] {sym} ptr val mem) // result: (MOVDstore [off1+off2] {sym} ptr val mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -4205,7 +4205,7 @@ func rewriteValueARM64_OpARM64MOVDstore(v *Value, config *Config) bool {
ptr := v_0.Args[0] ptr := v_0.Args[0]
val := v.Args[1] val := v.Args[1]
mem := v.Args[2] mem := v.Args[2]
if !(is32Bit(off1+off2) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) { if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
break break
} }
v.reset(OpARM64MOVDstore) v.reset(OpARM64MOVDstore)
@ -4217,7 +4217,7 @@ func rewriteValueARM64_OpARM64MOVDstore(v *Value, config *Config) bool {
return true return true
} }
// match: (MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) // match: (MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2))) // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
// result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -4231,7 +4231,7 @@ func rewriteValueARM64_OpARM64MOVDstore(v *Value, config *Config) bool {
ptr := v_0.Args[0] ptr := v_0.Args[0]
val := v.Args[1] val := v.Args[1]
mem := v.Args[2] mem := v.Args[2]
if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(mergeSym(sym1, sym2)) && !isAuto(mergeSym(sym1, sym2)))) { if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
break break
} }
v.reset(OpARM64MOVDstore) v.reset(OpARM64MOVDstore)
@ -4270,7 +4270,7 @@ func rewriteValueARM64_OpARM64MOVDstorezero(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (MOVDstorezero [off1] {sym} (ADDconst [off2] ptr) mem) // match: (MOVDstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
// cond: is32Bit(off1+off2) && ((off1+off2)%2==8 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) // cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%2==8 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
// result: (MOVDstorezero [off1+off2] {sym} ptr mem) // result: (MOVDstorezero [off1+off2] {sym} ptr mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -4282,7 +4282,7 @@ func rewriteValueARM64_OpARM64MOVDstorezero(v *Value, config *Config) bool {
off2 := v_0.AuxInt off2 := v_0.AuxInt
ptr := v_0.Args[0] ptr := v_0.Args[0]
mem := v.Args[1] mem := v.Args[1]
if !(is32Bit(off1+off2) && ((off1+off2)%2 == 8 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) { if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%2 == 8 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
break break
} }
v.reset(OpARM64MOVDstorezero) v.reset(OpARM64MOVDstorezero)
@ -4293,7 +4293,7 @@ func rewriteValueARM64_OpARM64MOVDstorezero(v *Value, config *Config) bool {
return true return true
} }
// match: (MOVDstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) // match: (MOVDstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2))) // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
// result: (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) // result: (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -4306,7 +4306,7 @@ func rewriteValueARM64_OpARM64MOVDstorezero(v *Value, config *Config) bool {
sym2 := v_0.Aux sym2 := v_0.Aux
ptr := v_0.Args[0] ptr := v_0.Args[0]
mem := v.Args[1] mem := v.Args[1]
if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(mergeSym(sym1, sym2)) && !isAuto(mergeSym(sym1, sym2)))) { if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
break break
} }
v.reset(OpARM64MOVDstorezero) v.reset(OpARM64MOVDstorezero)
@ -4322,7 +4322,7 @@ func rewriteValueARM64_OpARM64MOVHUload(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem) // match: (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem)
// cond: is32Bit(off1+off2) && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) // cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
// result: (MOVHUload [off1+off2] {sym} ptr mem) // result: (MOVHUload [off1+off2] {sym} ptr mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -4334,7 +4334,7 @@ func rewriteValueARM64_OpARM64MOVHUload(v *Value, config *Config) bool {
off2 := v_0.AuxInt off2 := v_0.AuxInt
ptr := v_0.Args[0] ptr := v_0.Args[0]
mem := v.Args[1] mem := v.Args[1]
if !(is32Bit(off1+off2) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) { if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
break break
} }
v.reset(OpARM64MOVHUload) v.reset(OpARM64MOVHUload)
@ -4345,7 +4345,7 @@ func rewriteValueARM64_OpARM64MOVHUload(v *Value, config *Config) bool {
return true return true
} }
// match: (MOVHUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) // match: (MOVHUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2))) // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
// result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) // result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -4358,7 +4358,7 @@ func rewriteValueARM64_OpARM64MOVHUload(v *Value, config *Config) bool {
sym2 := v_0.Aux sym2 := v_0.Aux
ptr := v_0.Args[0] ptr := v_0.Args[0]
mem := v.Args[1] mem := v.Args[1]
if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(mergeSym(sym1, sym2)) && !isAuto(mergeSym(sym1, sym2)))) { if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
break break
} }
v.reset(OpARM64MOVHUload) v.reset(OpARM64MOVHUload)
@ -4461,7 +4461,7 @@ func rewriteValueARM64_OpARM64MOVHload(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem) // match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem)
// cond: is32Bit(off1+off2) && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) // cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
// result: (MOVHload [off1+off2] {sym} ptr mem) // result: (MOVHload [off1+off2] {sym} ptr mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -4473,7 +4473,7 @@ func rewriteValueARM64_OpARM64MOVHload(v *Value, config *Config) bool {
off2 := v_0.AuxInt off2 := v_0.AuxInt
ptr := v_0.Args[0] ptr := v_0.Args[0]
mem := v.Args[1] mem := v.Args[1]
if !(is32Bit(off1+off2) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) { if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
break break
} }
v.reset(OpARM64MOVHload) v.reset(OpARM64MOVHload)
@ -4484,7 +4484,7 @@ func rewriteValueARM64_OpARM64MOVHload(v *Value, config *Config) bool {
return true return true
} }
// match: (MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) // match: (MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2))) // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
// result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -4497,7 +4497,7 @@ func rewriteValueARM64_OpARM64MOVHload(v *Value, config *Config) bool {
sym2 := v_0.Aux sym2 := v_0.Aux
ptr := v_0.Args[0] ptr := v_0.Args[0]
mem := v.Args[1] mem := v.Args[1]
if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(mergeSym(sym1, sym2)) && !isAuto(mergeSym(sym1, sym2)))) { if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
break break
} }
v.reset(OpARM64MOVHload) v.reset(OpARM64MOVHload)
@ -4624,7 +4624,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) // match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem)
// cond: is32Bit(off1+off2) && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) // cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
// result: (MOVHstore [off1+off2] {sym} ptr val mem) // result: (MOVHstore [off1+off2] {sym} ptr val mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -4637,7 +4637,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value, config *Config) bool {
ptr := v_0.Args[0] ptr := v_0.Args[0]
val := v.Args[1] val := v.Args[1]
mem := v.Args[2] mem := v.Args[2]
if !(is32Bit(off1+off2) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) { if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
break break
} }
v.reset(OpARM64MOVHstore) v.reset(OpARM64MOVHstore)
@ -4649,7 +4649,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value, config *Config) bool {
return true return true
} }
// match: (MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) // match: (MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2))) // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
// result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -4663,7 +4663,7 @@ func rewriteValueARM64_OpARM64MOVHstore(v *Value, config *Config) bool {
ptr := v_0.Args[0] ptr := v_0.Args[0]
val := v.Args[1] val := v.Args[1]
mem := v.Args[2] mem := v.Args[2]
if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(mergeSym(sym1, sym2)) && !isAuto(mergeSym(sym1, sym2)))) { if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
break break
} }
v.reset(OpARM64MOVHstore) v.reset(OpARM64MOVHstore)
@ -4786,7 +4786,7 @@ func rewriteValueARM64_OpARM64MOVHstorezero(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (MOVHstorezero [off1] {sym} (ADDconst [off2] ptr) mem) // match: (MOVHstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
// cond: is32Bit(off1+off2) && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) // cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
// result: (MOVHstorezero [off1+off2] {sym} ptr mem) // result: (MOVHstorezero [off1+off2] {sym} ptr mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -4798,7 +4798,7 @@ func rewriteValueARM64_OpARM64MOVHstorezero(v *Value, config *Config) bool {
off2 := v_0.AuxInt off2 := v_0.AuxInt
ptr := v_0.Args[0] ptr := v_0.Args[0]
mem := v.Args[1] mem := v.Args[1]
if !(is32Bit(off1+off2) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) { if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
break break
} }
v.reset(OpARM64MOVHstorezero) v.reset(OpARM64MOVHstorezero)
@ -4809,7 +4809,7 @@ func rewriteValueARM64_OpARM64MOVHstorezero(v *Value, config *Config) bool {
return true return true
} }
// match: (MOVHstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) // match: (MOVHstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2))) // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
// result: (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) // result: (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -4822,7 +4822,7 @@ func rewriteValueARM64_OpARM64MOVHstorezero(v *Value, config *Config) bool {
sym2 := v_0.Aux sym2 := v_0.Aux
ptr := v_0.Args[0] ptr := v_0.Args[0]
mem := v.Args[1] mem := v.Args[1]
if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(mergeSym(sym1, sym2)) && !isAuto(mergeSym(sym1, sym2)))) { if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
break break
} }
v.reset(OpARM64MOVHstorezero) v.reset(OpARM64MOVHstorezero)
@ -4838,7 +4838,7 @@ func rewriteValueARM64_OpARM64MOVWUload(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (MOVWUload [off1] {sym} (ADDconst [off2] ptr) mem) // match: (MOVWUload [off1] {sym} (ADDconst [off2] ptr) mem)
// cond: is32Bit(off1+off2) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) // cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
// result: (MOVWUload [off1+off2] {sym} ptr mem) // result: (MOVWUload [off1+off2] {sym} ptr mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -4850,7 +4850,7 @@ func rewriteValueARM64_OpARM64MOVWUload(v *Value, config *Config) bool {
off2 := v_0.AuxInt off2 := v_0.AuxInt
ptr := v_0.Args[0] ptr := v_0.Args[0]
mem := v.Args[1] mem := v.Args[1]
if !(is32Bit(off1+off2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) { if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
break break
} }
v.reset(OpARM64MOVWUload) v.reset(OpARM64MOVWUload)
@ -4861,7 +4861,7 @@ func rewriteValueARM64_OpARM64MOVWUload(v *Value, config *Config) bool {
return true return true
} }
// match: (MOVWUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) // match: (MOVWUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2))) // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
// result: (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) // result: (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -4874,7 +4874,7 @@ func rewriteValueARM64_OpARM64MOVWUload(v *Value, config *Config) bool {
sym2 := v_0.Aux sym2 := v_0.Aux
ptr := v_0.Args[0] ptr := v_0.Args[0]
mem := v.Args[1] mem := v.Args[1]
if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(mergeSym(sym1, sym2)) && !isAuto(mergeSym(sym1, sym2)))) { if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
break break
} }
v.reset(OpARM64MOVWUload) v.reset(OpARM64MOVWUload)
@ -5001,7 +5001,7 @@ func rewriteValueARM64_OpARM64MOVWload(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem) // match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem)
// cond: is32Bit(off1+off2) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) // cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
// result: (MOVWload [off1+off2] {sym} ptr mem) // result: (MOVWload [off1+off2] {sym} ptr mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -5013,7 +5013,7 @@ func rewriteValueARM64_OpARM64MOVWload(v *Value, config *Config) bool {
off2 := v_0.AuxInt off2 := v_0.AuxInt
ptr := v_0.Args[0] ptr := v_0.Args[0]
mem := v.Args[1] mem := v.Args[1]
if !(is32Bit(off1+off2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) { if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
break break
} }
v.reset(OpARM64MOVWload) v.reset(OpARM64MOVWload)
@ -5024,7 +5024,7 @@ func rewriteValueARM64_OpARM64MOVWload(v *Value, config *Config) bool {
return true return true
} }
// match: (MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) // match: (MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2))) // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
// result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -5037,7 +5037,7 @@ func rewriteValueARM64_OpARM64MOVWload(v *Value, config *Config) bool {
sym2 := v_0.Aux sym2 := v_0.Aux
ptr := v_0.Args[0] ptr := v_0.Args[0]
mem := v.Args[1] mem := v.Args[1]
if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(mergeSym(sym1, sym2)) && !isAuto(mergeSym(sym1, sym2)))) { if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
break break
} }
v.reset(OpARM64MOVWload) v.reset(OpARM64MOVWload)
@ -5212,7 +5212,7 @@ func rewriteValueARM64_OpARM64MOVWstore(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem) // match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem)
// cond: is32Bit(off1+off2) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) // cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
// result: (MOVWstore [off1+off2] {sym} ptr val mem) // result: (MOVWstore [off1+off2] {sym} ptr val mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -5225,7 +5225,7 @@ func rewriteValueARM64_OpARM64MOVWstore(v *Value, config *Config) bool {
ptr := v_0.Args[0] ptr := v_0.Args[0]
val := v.Args[1] val := v.Args[1]
mem := v.Args[2] mem := v.Args[2]
if !(is32Bit(off1+off2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) { if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
break break
} }
v.reset(OpARM64MOVWstore) v.reset(OpARM64MOVWstore)
@ -5237,7 +5237,7 @@ func rewriteValueARM64_OpARM64MOVWstore(v *Value, config *Config) bool {
return true return true
} }
// match: (MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) // match: (MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2))) // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
// result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -5251,7 +5251,7 @@ func rewriteValueARM64_OpARM64MOVWstore(v *Value, config *Config) bool {
ptr := v_0.Args[0] ptr := v_0.Args[0]
val := v.Args[1] val := v.Args[1]
mem := v.Args[2] mem := v.Args[2]
if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(mergeSym(sym1, sym2)) && !isAuto(mergeSym(sym1, sym2)))) { if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
break break
} }
v.reset(OpARM64MOVWstore) v.reset(OpARM64MOVWstore)
@ -5332,7 +5332,7 @@ func rewriteValueARM64_OpARM64MOVWstorezero(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (MOVWstorezero [off1] {sym} (ADDconst [off2] ptr) mem) // match: (MOVWstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
// cond: is32Bit(off1+off2) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) // cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
// result: (MOVWstorezero [off1+off2] {sym} ptr mem) // result: (MOVWstorezero [off1+off2] {sym} ptr mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -5344,7 +5344,7 @@ func rewriteValueARM64_OpARM64MOVWstorezero(v *Value, config *Config) bool {
off2 := v_0.AuxInt off2 := v_0.AuxInt
ptr := v_0.Args[0] ptr := v_0.Args[0]
mem := v.Args[1] mem := v.Args[1]
if !(is32Bit(off1+off2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) { if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
break break
} }
v.reset(OpARM64MOVWstorezero) v.reset(OpARM64MOVWstorezero)
@ -5355,7 +5355,7 @@ func rewriteValueARM64_OpARM64MOVWstorezero(v *Value, config *Config) bool {
return true return true
} }
// match: (MOVWstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) // match: (MOVWstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2))) // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
// result: (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) // result: (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for { for {
off1 := v.AuxInt off1 := v.AuxInt
@ -5368,7 +5368,7 @@ func rewriteValueARM64_OpARM64MOVWstorezero(v *Value, config *Config) bool {
sym2 := v_0.Aux sym2 := v_0.Aux
ptr := v_0.Args[0] ptr := v_0.Args[0]
mem := v.Args[1] mem := v.Args[1]
if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(mergeSym(sym1, sym2)) && !isAuto(mergeSym(sym1, sym2)))) { if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
break break
} }
v.reset(OpARM64MOVWstorezero) v.reset(OpARM64MOVWstorezero)

View File

@ -20,3 +20,16 @@ func f(b [6]byte) T {
_ = x _ = x
return T{b: b} return T{b: b}
} }
// Arg symbol's base address may be not at an aligned offset to
// SP. Folding arg's address into load/store may cause odd offset.
func move(a, b [20]byte) [20]byte {
var x [1000]int // a large stack frame
_ = x
return b // b is not 8-byte aligned to SP
}
func zero() ([20]byte, [20]byte) {
var x [1000]int // a large stack frame
_ = x
return [20]byte{}, [20]byte{} // the second return value is not 8-byte aligned to SP
}