1
0
mirror of https://github.com/golang/go synced 2024-11-26 03:07:57 -07:00

cmd/internal/obj/arm64: Add helpers for span7 passes

Adds helper functions for the literal pooling, large branch handling
and code emission stages of the span7 assembler pass. This hides the
implementation of the current assembler from the general workflow in
span7 to make the implementation easier to change in future.

Updates #44734

Change-Id: I8859956b23ad4faebeeff6df28051b098ef90fed
Reviewed-on: https://go-review.googlesource.com/c/go/+/595755
Reviewed-by: Cherry Mui <cherryyz@google.com>
Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
This commit is contained in:
Sebastian Nickolls 2024-06-26 17:10:20 +01:00 committed by Cherry Mui
parent 8cd550a232
commit 557211c150

View File

@ -33,6 +33,7 @@ package arm64
import ( import (
"cmd/internal/obj" "cmd/internal/obj"
"cmd/internal/objabi" "cmd/internal/objabi"
"encoding/binary"
"fmt" "fmt"
"log" "log"
"math" "math"
@ -1099,40 +1100,15 @@ func span7(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
c := ctxt7{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset & 0xffffffff), extrasize: int32(p.To.Offset >> 32)} c := ctxt7{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset & 0xffffffff), extrasize: int32(p.To.Offset >> 32)}
p.To.Offset &= 0xffffffff // extrasize is no longer needed p.To.Offset &= 0xffffffff // extrasize is no longer needed
bflag := 1 // Process literal pool and allocate initial program counter for each Prog, before
// generating branch veneers.
pc := int64(0) pc := int64(0)
p.Pc = pc p.Pc = pc
var m int
var o *Optab
for p = p.Link; p != nil; p = p.Link { for p = p.Link; p != nil; p = p.Link {
p.Pc = pc p.Pc = pc
o = c.oplook(p) c.addLiteralsToPool(p)
m = o.size(c.ctxt, p) pc += int64(c.asmsizeBytes(p))
if m == 0 {
switch p.As {
case obj.APCALIGN, obj.APCALIGNMAX:
m = obj.AlignmentPadding(int32(pc), p, ctxt, cursym)
break
case obj.ANOP, obj.AFUNCDATA, obj.APCDATA:
continue
default:
c.ctxt.Diag("zero-width instruction\n%v", p)
} }
}
pc += int64(m)
if o.flag&LFROM != 0 {
c.addpool(p, &p.From)
}
if o.flag&LTO != 0 {
c.addpool(p, &p.To)
}
if c.blitrl != nil {
c.checkpool(p)
}
}
c.cursym.Size = pc
/* /*
* if any procedure is large enough to * if any procedure is large enough to
@ -1140,17 +1116,105 @@ func span7(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
* generate extra passes putting branches * generate extra passes putting branches
* around jmps to fix. this is rare. * around jmps to fix. this is rare.
*/ */
for bflag != 0 { changed := true
bflag = 0 for changed {
changed = false
pc = 0 pc = 0
for p = c.cursym.Func().Text.Link; p != nil; p = p.Link { for p = c.cursym.Func().Text.Link; p != nil; p = p.Link {
p.Pc = pc p.Pc = pc
o = c.oplook(p) changed = changed || c.fixUpLongBranch(p)
pc += int64(c.asmsizeBytes(p))
}
}
/*
* lay out the code, emitting code and data relocations.
*/
buf := codeBuffer{&c.cursym.P}
for p := c.cursym.Func().Text.Link; p != nil; p = p.Link {
c.pc = p.Pc
switch p.As {
case obj.APCALIGN, obj.APCALIGNMAX:
v := obj.AlignmentPaddingLength(int32(p.Pc), p, c.ctxt)
for i := 0; i < int(v/4); i++ {
// emit ANOOP instruction by the padding size
buf.emit(OP_NOOP)
}
case obj.ANOP, obj.AFUNCDATA, obj.APCDATA:
continue
default:
var out [6]uint32
count := c.asmout(p, out[:])
buf.emit(out[:count]...)
}
}
buf.finish()
c.cursym.Size = int64(len(c.cursym.P))
// Mark nonpreemptible instruction sequences.
// We use REGTMP as a scratch register during call injection,
// so instruction sequences that use REGTMP are unsafe to
// preempt asynchronously.
obj.MarkUnsafePoints(c.ctxt, c.cursym.Func().Text, c.newprog, c.isUnsafePoint, c.isRestartable)
// Now that we know byte offsets, we can generate jump table entries.
for _, jt := range cursym.Func().JumpTables {
for i, p := range jt.Targets {
// The ith jumptable entry points to the p.Pc'th
// byte in the function symbol s.
// TODO: try using relative PCs.
jt.Sym.WriteAddr(ctxt, int64(i)*8, 8, cursym, p.Pc)
}
}
}
type codeBuffer struct {
data *[]byte
}
func (cb *codeBuffer) pc() int64 {
return int64(len(*cb.data))
}
// Write a sequence of opcodes into the code buffer.
func (cb *codeBuffer) emit(op ...uint32) {
for _, o := range op {
*cb.data = binary.LittleEndian.AppendUint32(*cb.data, o)
}
}
// Completes the code buffer for the function by padding the buffer to function alignment
// with zero values.
func (cb *codeBuffer) finish() {
for len(*cb.data)%funcAlign > 0 {
*cb.data = append(*cb.data, 0)
}
}
// Return the size of the assembled Prog, in bytes.
func (c *ctxt7) asmsizeBytes(p *obj.Prog) int {
switch p.As {
case obj.APCALIGN, obj.APCALIGNMAX:
return obj.AlignmentPadding(int32(p.Pc), p, c.ctxt, c.cursym)
case obj.ANOP, obj.AFUNCDATA, obj.APCDATA:
return 0
default:
o := c.oplook(p)
return o.size(c.ctxt, p)
}
}
// Modify the Prog list if the Prog is a branch with a large offset that cannot be
// encoded in the instruction. Return true if a modification was made, false if not.
func (c *ctxt7) fixUpLongBranch(p *obj.Prog) bool {
var toofar bool
o := c.oplook(p)
/* very large branches */ /* very large branches */
if (o.flag&BRANCH14BITS != 0 || o.flag&BRANCH19BITS != 0) && p.To.Target() != nil { if (o.flag&BRANCH14BITS != 0 || o.flag&BRANCH19BITS != 0) && p.To.Target() != nil {
otxt := p.To.Target().Pc - pc otxt := p.To.Target().Pc - p.Pc
var toofar bool
if o.flag&BRANCH14BITS != 0 { // branch instruction encodes 14 bits if o.flag&BRANCH14BITS != 0 { // branch instruction encodes 14 bits
toofar = otxt <= -(1<<15)+10 || otxt >= (1<<15)-10 toofar = otxt <= -(1<<15)+10 || otxt >= (1<<15)-10
} else if o.flag&BRANCH19BITS != 0 { // branch instruction encodes 19 bits } else if o.flag&BRANCH19BITS != 0 { // branch instruction encodes 19 bits
@ -1170,77 +1234,24 @@ func span7(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
q.As = AB q.As = AB
q.To.Type = obj.TYPE_BRANCH q.To.Type = obj.TYPE_BRANCH
q.To.SetTarget(q.Link.Link) q.To.SetTarget(q.Link.Link)
bflag = 1
}
}
m = o.size(c.ctxt, p)
if m == 0 {
switch p.As {
case obj.APCALIGN, obj.APCALIGNMAX:
m = obj.AlignmentPaddingLength(int32(pc), p, ctxt)
break
case obj.ANOP, obj.AFUNCDATA, obj.APCDATA:
continue
default:
c.ctxt.Diag("zero-width instruction\n%v", p)
} }
} }
pc += int64(m) return toofar
}
} }
pc += -pc & (funcAlign - 1) // Adds literal values from the Prog into the literal pool if necessary.
c.cursym.Size = pc func (c *ctxt7) addLiteralsToPool(p *obj.Prog) {
o := c.oplook(p)
/* if o.flag&LFROM != 0 {
* lay out the code, emitting code and data relocations. c.addpool(p, &p.From)
*/
c.cursym.Grow(c.cursym.Size)
bp := c.cursym.P
psz := int32(0)
var i int
var out [6]uint32
for p := c.cursym.Func().Text.Link; p != nil; p = p.Link {
c.pc = p.Pc
o = c.oplook(p)
sz := o.size(c.ctxt, p)
if sz > 4*len(out) {
log.Fatalf("out array in span7 is too small, need at least %d for %v", sz/4, p)
} }
if p.As == obj.APCALIGN || p.As == obj.APCALIGNMAX { if o.flag&LTO != 0 {
v := obj.AlignmentPaddingLength(int32(p.Pc), p, c.ctxt) c.addpool(p, &p.To)
for i = 0; i < int(v/4); i++ {
// emit ANOOP instruction by the padding size
c.ctxt.Arch.ByteOrder.PutUint32(bp, OP_NOOP)
bp = bp[4:]
psz += 4
}
} else {
c.asmout(p, o, out[:])
for i = 0; i < sz/4; i++ {
c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i])
bp = bp[4:]
psz += 4
}
}
}
// Mark nonpreemptible instruction sequences.
// We use REGTMP as a scratch register during call injection,
// so instruction sequences that use REGTMP are unsafe to
// preempt asynchronously.
obj.MarkUnsafePoints(c.ctxt, c.cursym.Func().Text, c.newprog, c.isUnsafePoint, c.isRestartable)
// Now that we know byte offsets, we can generate jump table entries.
for _, jt := range cursym.Func().JumpTables {
for i, p := range jt.Targets {
// The ith jumptable entry points to the p.Pc'th
// byte in the function symbol s.
// TODO: try using relative PCs.
jt.Sym.WriteAddr(ctxt, int64(i)*8, 8, cursym, p.Pc)
} }
if c.blitrl != nil {
c.checkpool(p)
} }
} }
@ -3456,7 +3467,9 @@ func (c *ctxt7) checkShiftAmount(p *obj.Prog, a *obj.Addr) {
} }
} }
func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { func (c *ctxt7) asmout(p *obj.Prog, out []uint32) (count int) {
o := c.oplook(p)
var os [5]uint32 var os [5]uint32
o1 := uint32(0) o1 := uint32(0)
o2 := uint32(0) o2 := uint32(0)
@ -5896,6 +5909,8 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) {
out[2] = o3 out[2] = o3
out[3] = o4 out[3] = o4
out[4] = o5 out[4] = o5
return int(o.size(c.ctxt, p) / 4)
} }
func (c *ctxt7) addrRelocType(p *obj.Prog) objabi.RelocType { func (c *ctxt7) addrRelocType(p *obj.Prog) objabi.RelocType {