diff --git a/src/cmd/asm/internal/asm/testdata/arm64.s b/src/cmd/asm/internal/asm/testdata/arm64.s index c1385a13ab..17ecd9b2b8 100644 --- a/src/cmd/asm/internal/asm/testdata/arm64.s +++ b/src/cmd/asm/internal/asm/testdata/arm64.s @@ -364,6 +364,9 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8 MOVD $1, ZR MOVD $1, R1 MOVK $1, R1 + MOVD $0x1000100010001000, RSP // MOVD $1152939097061330944, RSP // ff8304b2 + MOVW $0x10001000, RSP // MOVW $268439552, RSP // ff830432 + ADDW $0x10001000, R1 // ADDW $268439552, R1 // fb83043221001b0b // move a large constant to a Vd. VMOVS $0x80402010, V11 // VMOVS $2151686160, V11 diff --git a/src/cmd/internal/obj/arm64/asm7.go b/src/cmd/internal/obj/arm64/asm7.go index f7c0a48214..3b0fa6fb53 100644 --- a/src/cmd/internal/obj/arm64/asm7.go +++ b/src/cmd/internal/obj/arm64/asm7.go @@ -404,8 +404,8 @@ var optab = []Optab{ /* MOVs that become MOVK/MOVN/MOVZ/ADD/SUB/OR */ {AMOVW, C_MOVCON, C_NONE, C_NONE, C_REG, 32, 4, 0, 0, 0}, {AMOVD, C_MOVCON, C_NONE, C_NONE, C_REG, 32, 4, 0, 0, 0}, - {AMOVW, C_BITCON, C_NONE, C_NONE, C_REG, 32, 4, 0, 0, 0}, - {AMOVD, C_BITCON, C_NONE, C_NONE, C_REG, 32, 4, 0, 0, 0}, + {AMOVW, C_BITCON, C_NONE, C_NONE, C_RSP, 32, 4, 0, 0, 0}, + {AMOVD, C_BITCON, C_NONE, C_NONE, C_RSP, 32, 4, 0, 0, 0}, {AMOVW, C_MOVCON2, C_NONE, C_NONE, C_REG, 12, 8, 0, NOTUSETMP, 0}, {AMOVD, C_MOVCON2, C_NONE, C_NONE, C_REG, 12, 8, 0, NOTUSETMP, 0}, {AMOVD, C_MOVCON3, C_NONE, C_NONE, C_REG, 12, 12, 0, NOTUSETMP, 0}, @@ -2060,9 +2060,10 @@ func (c *ctxt7) oplook(p *obj.Prog) *Optab { } a1 = a0 + 1 p.From.Class = int8(a1) - // more specific classification of 32-bit integers if p.From.Type == obj.TYPE_CONST && p.From.Name == obj.NAME_NONE { - if p.As == AMOVW || isADDWop(p.As) { + if p.As == AMOVW || isADDWop(p.As) || isANDWop(p.As) { + // For 32-bit instruction with constant, we need to + // treat its offset value as 32 bits to classify it. ra0 := c.con32class(&p.From) // do not break C_ADDCON2 when S bit is set if (p.As == AADDSW || p.As == ASUBSW) && ra0 == C_ADDCON2 { @@ -2071,16 +2072,8 @@ func (c *ctxt7) oplook(p *obj.Prog) *Optab { a1 = ra0 + 1 p.From.Class = int8(a1) } - if isANDWop(p.As) && a0 != C_BITCON { - // For 32-bit logical instruction with constant, - // the BITCON test is special in that it looks at - // the 64-bit which has the high 32-bit as a copy - // of the low 32-bit. We have handled that and - // don't pass it to con32class. - a1 = c.con32class(&p.From) + 1 - p.From.Class = int8(a1) - } if ((p.As == AMOVD) || isANDop(p.As) || isADDop(p.As)) && (a0 == C_LCON || a0 == C_VCON) { + // more specific classification of 64-bit integers a1 = c.con64class(&p.From) + 1 p.From.Class = int8(a1) } diff --git a/src/cmd/internal/obj/arm64/obj7.go b/src/cmd/internal/obj/arm64/obj7.go index 8f7648e5d5..425cb88f7e 100644 --- a/src/cmd/internal/obj/arm64/obj7.go +++ b/src/cmd/internal/obj/arm64/obj7.go @@ -314,13 +314,13 @@ func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { } } - // For 32-bit logical instruction with constant, - // rewrite the high 32-bit to be a repetition of - // the low 32-bit, so that the BITCON test can be - // shared for both 32-bit and 64-bit. 32-bit ops - // will zero the high 32-bit of the destination - // register anyway. - if isANDWop(p.As) && p.From.Type == obj.TYPE_CONST { + // For 32-bit instruction with constant, rewrite + // the high 32-bit to be a repetition of the low + // 32-bit, so that the BITCON test can be shared + // for both 32-bit and 64-bit. 32-bit ops will + // zero the high 32-bit of the destination register + // anyway. + if (isANDWop(p.As) || isADDWop(p.As) || p.As == AMOVW) && p.From.Type == obj.TYPE_CONST { v := p.From.Offset & 0xffffffff p.From.Offset = v | v<<32 }