diff --git a/src/cmd/internal/obj/link.go b/src/cmd/internal/obj/link.go index 232ae241dd9..59a32d98948 100644 --- a/src/cmd/internal/obj/link.go +++ b/src/cmd/internal/obj/link.go @@ -727,7 +727,6 @@ type Link struct { InlTree InlTree // global inlining tree used by gc/inl.go Imports []string Plan9privates *LSym - Printp *Prog Instoffset int64 Autosize int32 Pc int64 diff --git a/src/cmd/internal/obj/s390x/asmz.go b/src/cmd/internal/obj/s390x/asmz.go index a7dafdffa08..3b550355e8c 100644 --- a/src/cmd/internal/obj/s390x/asmz.go +++ b/src/cmd/internal/obj/s390x/asmz.go @@ -36,6 +36,18 @@ import ( "sort" ) +// ctxtz holds state while assembling a single function. +// Each function gets a fresh ctxtz. +// This allows for multiple functions to be safely concurrently assembled. +type ctxtz struct { + ctxt *obj.Link + newprog obj.ProgAlloc + cursym *obj.LSym + autosize int32 + instoffset int64 + pc int64 +} + // instruction layout. const ( funcAlign = 16 @@ -390,50 +402,50 @@ func spanz(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { if p == nil || p.Link == nil { // handle external functions and ELF section symbols return } - ctxt.Cursym = cursym - ctxt.Autosize = int32(p.To.Offset) if oprange[AORW&obj.AMask] == nil { ctxt.Diag("s390x ops not initialized, call s390x.buildop first") } + c := ctxtz{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset)} + buffer := make([]byte, 0) changed := true loop := 0 for changed { if loop > 10 { - ctxt.Diag("stuck in spanz loop") + c.ctxt.Diag("stuck in spanz loop") break } changed = false buffer = buffer[:0] - ctxt.Cursym.R = make([]obj.Reloc, 0) - for p := cursym.Text; p != nil; p = p.Link { + c.cursym.R = make([]obj.Reloc, 0) + for p := c.cursym.Text; p != nil; p = p.Link { pc := int64(len(buffer)) if pc != p.Pc { changed = true } p.Pc = pc - ctxt.Pc = p.Pc - asmout(ctxt, p, &buffer) + c.pc = p.Pc + c.asmout(p, &buffer) if pc == int64(len(buffer)) { switch p.As { case obj.ANOP, obj.AFUNCDATA, obj.APCDATA, obj.ATEXT: // ok default: - ctxt.Diag("zero-width instruction\n%v", p) + c.ctxt.Diag("zero-width instruction\n%v", p) } } } loop++ } - cursym.Size = int64(len(buffer)) - if cursym.Size%funcAlign != 0 { - cursym.Size += funcAlign - (cursym.Size % funcAlign) + c.cursym.Size = int64(len(buffer)) + if c.cursym.Size%funcAlign != 0 { + c.cursym.Size += funcAlign - (c.cursym.Size % funcAlign) } - cursym.Grow(cursym.Size) - copy(cursym.P, buffer) + c.cursym.Grow(c.cursym.Size) + copy(c.cursym.P, buffer) } func isint32(v int64) bool { @@ -444,7 +456,7 @@ func isuint32(v uint64) bool { return uint64(uint32(v)) == v } -func aclass(ctxt *obj.Link, a *obj.Addr) int { +func (c *ctxtz) aclass(a *obj.Addr) int { switch a.Type { case obj.TYPE_NONE: return C_NONE @@ -472,9 +484,9 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int { // must have a symbol break } - ctxt.Instoffset = a.Offset + c.instoffset = a.Offset if a.Sym.Type == obj.STLSBSS { - if ctxt.Flag_shared { + if c.ctxt.Flag_shared { return C_TLS_IE // initial exec model } return C_TLS_LE // local exec model @@ -485,25 +497,25 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int { return C_GOTADDR case obj.NAME_AUTO: - ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset - if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG { + c.instoffset = int64(c.autosize) + a.Offset + if c.instoffset >= -BIG && c.instoffset < BIG { return C_SAUTO } return C_LAUTO case obj.NAME_PARAM: - ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset + ctxt.FixedFrameSize() - if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG { + c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize() + if c.instoffset >= -BIG && c.instoffset < BIG { return C_SAUTO } return C_LAUTO case obj.NAME_NONE: - ctxt.Instoffset = a.Offset - if ctxt.Instoffset == 0 { + c.instoffset = a.Offset + if c.instoffset == 0 { return C_ZOREG } - if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG { + if c.instoffset >= -BIG && c.instoffset < BIG { return C_SOREG } return C_LOREG @@ -518,18 +530,18 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int { if f64, ok := a.Val.(float64); ok && math.Float64bits(f64) == 0 { return C_ZCON } - ctxt.Diag("cannot handle the floating point constant %v", a.Val) + c.ctxt.Diag("cannot handle the floating point constant %v", a.Val) case obj.TYPE_CONST, obj.TYPE_ADDR: switch a.Name { case obj.NAME_NONE: - ctxt.Instoffset = a.Offset + c.instoffset = a.Offset if a.Reg != 0 { - if -BIG <= ctxt.Instoffset && ctxt.Instoffset <= BIG { + if -BIG <= c.instoffset && c.instoffset <= BIG { return C_SACON } - if isint32(ctxt.Instoffset) { + if isint32(c.instoffset) { return C_LACON } return C_DACON @@ -542,7 +554,7 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int { if s == nil { break } - ctxt.Instoffset = a.Offset + c.instoffset = a.Offset if s.Type == obj.SCONST { goto consize } @@ -550,15 +562,15 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int { return C_SYMADDR case obj.NAME_AUTO: - ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset - if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG { + c.instoffset = int64(c.autosize) + a.Offset + if c.instoffset >= -BIG && c.instoffset < BIG { return C_SACON } return C_LACON case obj.NAME_PARAM: - ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset + ctxt.FixedFrameSize() - if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG { + c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize() + if c.instoffset >= -BIG && c.instoffset < BIG { return C_SACON } return C_LACON @@ -567,32 +579,32 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int { return C_GOK consize: - if ctxt.Instoffset == 0 { + if c.instoffset == 0 { return C_ZCON } - if ctxt.Instoffset >= 0 { - if ctxt.Instoffset <= 0x7fff { + if c.instoffset >= 0 { + if c.instoffset <= 0x7fff { return C_SCON } - if ctxt.Instoffset <= 0xffff { + if c.instoffset <= 0xffff { return C_ANDCON } - if ctxt.Instoffset&0xffff == 0 && isuint32(uint64(ctxt.Instoffset)) { /* && (instoffset & (1<<31)) == 0) */ + if c.instoffset&0xffff == 0 && isuint32(uint64(c.instoffset)) { /* && (instoffset & (1<<31)) == 0) */ return C_UCON } - if isint32(ctxt.Instoffset) || isuint32(uint64(ctxt.Instoffset)) { + if isint32(c.instoffset) || isuint32(uint64(c.instoffset)) { return C_LCON } return C_DCON } - if ctxt.Instoffset >= -0x8000 { + if c.instoffset >= -0x8000 { return C_ADDCON } - if ctxt.Instoffset&0xffff == 0 && isint32(ctxt.Instoffset) { + if c.instoffset&0xffff == 0 && isint32(c.instoffset) { return C_UCON } - if isint32(ctxt.Instoffset) { + if isint32(c.instoffset) { return C_LCON } return C_DCON @@ -604,14 +616,14 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int { return C_GOK } -func oplook(ctxt *obj.Link, p *obj.Prog) *Optab { +func (c *ctxtz) oplook(p *obj.Prog) *Optab { a1 := int(p.Optab) if a1 != 0 { return &optab[a1-1] } a1 = int(p.From.Class) if a1 == 0 { - a1 = aclass(ctxt, &p.From) + 1 + a1 = c.aclass(&p.From) + 1 p.From.Class = int8(a1) } @@ -620,7 +632,7 @@ func oplook(ctxt *obj.Link, p *obj.Prog) *Optab { if p.From3 != nil { a3 = int(p.From3.Class) if a3 == 0 { - a3 = aclass(ctxt, p.From3) + 1 + a3 = c.aclass(p.From3) + 1 p.From3.Class = int8(a3) } } @@ -628,7 +640,7 @@ func oplook(ctxt *obj.Link, p *obj.Prog) *Optab { a3-- a4 := int(p.To.Class) if a4 == 0 { - a4 = aclass(ctxt, &p.To) + 1 + a4 = c.aclass(&p.To) + 1 p.To.Class = int8(a4) } @@ -660,8 +672,8 @@ func oplook(ctxt *obj.Link, p *obj.Prog) *Optab { } // cannot find a case; abort - ctxt.Diag("illegal combination %v %v %v %v %v\n", p.As, DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4)) - ctxt.Diag("prog: %v\n", p) + c.ctxt.Diag("illegal combination %v %v %v %v %v\n", p.As, DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4)) + c.ctxt.Diag("prog: %v\n", p) return nil } @@ -2484,13 +2496,13 @@ func oclass(a *obj.Addr) int { // Add a relocation for the immediate in a RIL style instruction. // The addend will be adjusted as required. -func addrilreloc(ctxt *obj.Link, sym *obj.LSym, add int64) *obj.Reloc { +func (c *ctxtz) addrilreloc(sym *obj.LSym, add int64) *obj.Reloc { if sym == nil { - ctxt.Diag("require symbol to apply relocation") + c.ctxt.Diag("require symbol to apply relocation") } offset := int64(2) // relocation offset from start of instruction - rel := obj.Addrel(ctxt.Cursym) - rel.Off = int32(ctxt.Pc + offset) + rel := obj.Addrel(c.cursym) + rel.Off = int32(c.pc + offset) rel.Siz = 4 rel.Sym = sym rel.Add = add + offset + int64(rel.Siz) @@ -2498,13 +2510,13 @@ func addrilreloc(ctxt *obj.Link, sym *obj.LSym, add int64) *obj.Reloc { return rel } -func addrilrelocoffset(ctxt *obj.Link, sym *obj.LSym, add, offset int64) *obj.Reloc { +func (c *ctxtz) addrilrelocoffset(sym *obj.LSym, add, offset int64) *obj.Reloc { if sym == nil { - ctxt.Diag("require symbol to apply relocation") + c.ctxt.Diag("require symbol to apply relocation") } offset += int64(2) // relocation offset from start of instruction - rel := obj.Addrel(ctxt.Cursym) - rel.Off = int32(ctxt.Pc + offset) + rel := obj.Addrel(c.cursym) + rel.Off = int32(c.pc + offset) rel.Siz = 4 rel.Sym = sym rel.Add = add + offset + int64(rel.Siz) @@ -2514,13 +2526,13 @@ func addrilrelocoffset(ctxt *obj.Link, sym *obj.LSym, add, offset int64) *obj.Re // Add a CALL relocation for the immediate in a RIL style instruction. // The addend will be adjusted as required. -func addcallreloc(ctxt *obj.Link, sym *obj.LSym, add int64) *obj.Reloc { +func (c *ctxtz) addcallreloc(sym *obj.LSym, add int64) *obj.Reloc { if sym == nil { - ctxt.Diag("require symbol to apply relocation") + c.ctxt.Diag("require symbol to apply relocation") } offset := int64(2) // relocation offset from start of instruction - rel := obj.Addrel(ctxt.Cursym) - rel.Off = int32(ctxt.Pc + offset) + rel := obj.Addrel(c.cursym) + rel.Off = int32(c.pc + offset) rel.Siz = 4 rel.Sym = sym rel.Add = add + offset + int64(rel.Siz) @@ -2528,7 +2540,7 @@ func addcallreloc(ctxt *obj.Link, sym *obj.LSym, add int64) *obj.Reloc { return rel } -func branchMask(ctxt *obj.Link, p *obj.Prog) uint32 { +func (c *ctxtz) branchMask(p *obj.Prog) uint32 { switch p.As { case ABEQ, ACMPBEQ, ACMPUBEQ, AMOVDEQ: return 0x8 @@ -2551,17 +2563,16 @@ func branchMask(ctxt *obj.Link, p *obj.Prog) uint32 { case ABVS: return 0x1 // unordered } - ctxt.Diag("unknown conditional branch %v", p.As) + c.ctxt.Diag("unknown conditional branch %v", p.As) return 0xF } -func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { - o := oplook(ctxt, p) - ctxt.Printp = p +func (c *ctxtz) asmout(p *obj.Prog, asm *[]byte) { + o := c.oplook(p) switch o.type_ { default: - ctxt.Diag("unknown type %d", o.type_) + c.ctxt.Diag("unknown type %d", o.type_) case 0: // PSEUDO OPS break @@ -2569,7 +2580,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { case 1: // mov reg reg switch p.As { default: - ctxt.Diag("unhandled operation: %v", p.As) + c.ctxt.Diag("unhandled operation: %v", p.As) case AMOVD: zRRE(op_LGR, uint32(p.To.Reg), uint32(p.From.Reg), asm) // sign extend @@ -2606,7 +2617,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { switch p.As { default: - ctxt.Diag("invalid opcode") + c.ctxt.Diag("invalid opcode") case AADD: opcode = op_AGRK case AADDC: @@ -2668,7 +2679,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { } case 3: // mov $constant reg - v := vregoff(ctxt, &p.From) + v := c.vregoff(&p.From) switch p.As { case AMOVBZ: v = int64(uint8(v)) @@ -2760,7 +2771,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { } case 7: // shift/rotate reg [reg] reg - d2 := vregoff(ctxt, &p.From) + d2 := c.vregoff(&p.From) b2 := p.From.Reg r3 := p.Reg if r3 == 0 { @@ -2791,7 +2802,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { case 8: // find leftmost one if p.To.Reg&1 != 0 { - ctxt.Diag("target must be an even-numbered register") + c.ctxt.Diag("target must be an even-numbered register") } // FLOGR also writes a mask to p.To.Reg+1. zRRE(op_FLOGR, uint32(p.To.Reg), uint32(p.From.Reg), asm) @@ -2851,13 +2862,13 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { zRIL(_c, op_BRCL, 0xF, uint32(v), asm) } if p.To.Sym != nil { - addcallreloc(ctxt, p.To.Sym, p.To.Offset) + c.addcallreloc(p.To.Sym, p.To.Offset) } } case 12: r1 := p.To.Reg - d2 := vregoff(ctxt, &p.From) + d2 := c.vregoff(&p.From) b2 := p.From.Reg if b2 == 0 { b2 = o.param @@ -2929,18 +2940,18 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { if p.Pcond != nil { v = int32((p.Pcond.Pc - p.Pc) >> 1) } - mask := branchMask(ctxt, p) + mask := c.branchMask(p) if p.To.Sym == nil && int32(int16(v)) == v { zRI(op_BRC, mask, uint32(v), asm) } else { zRIL(_c, op_BRCL, mask, uint32(v), asm) } if p.To.Sym != nil { - addrilreloc(ctxt, p.To.Sym, p.To.Offset) + c.addrilreloc(p.To.Sym, p.To.Offset) } case 17: // move on condition - m3 := branchMask(ctxt, p) + m3 := c.branchMask(p) zRRF(op_LOCGR, m3, 0, uint32(p.To.Reg), uint32(p.From.Reg), asm) case 18: // br/bl reg @@ -2951,16 +2962,16 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { } case 19: // mov $sym+n(SB) reg - d := vregoff(ctxt, &p.From) + d := c.vregoff(&p.From) zRIL(_b, op_LARL, uint32(p.To.Reg), 0, asm) if d&1 != 0 { zRX(op_LA, uint32(p.To.Reg), uint32(p.To.Reg), 0, 1, asm) d -= 1 } - addrilreloc(ctxt, p.From.Sym, d) + c.addrilreloc(p.From.Sym, d) case 21: // subtract $constant [reg] reg - v := vregoff(ctxt, &p.From) + v := c.vregoff(&p.From) r := p.Reg if r == 0 { r = p.To.Reg @@ -2982,7 +2993,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { } case 22: // add/multiply $constant [reg] reg - v := vregoff(ctxt, &p.From) + v := c.vregoff(&p.From) r := p.Reg if r == 0 { r = p.To.Reg @@ -3028,10 +3039,10 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { case 23: // 64-bit logical op $constant reg // TODO(mundaym): merge with case 24. - v := vregoff(ctxt, &p.From) + v := c.vregoff(&p.From) switch p.As { default: - ctxt.Diag("%v is not supported", p) + c.ctxt.Diag("%v is not supported", p) case AAND: if v >= 0 { // needs zero extend zRIL(_a, op_LGFI, REGTMP, uint32(v), asm) @@ -3060,7 +3071,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { } case 24: // 32-bit logical op $constant reg - v := vregoff(ctxt, &p.From) + v := c.vregoff(&p.From) switch p.As { case AANDW: if uint32(v&0xffff0000) == 0xffff0000 { @@ -3083,7 +3094,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { } case 26: // MOVD $offset(base)(index), reg - v := regoff(ctxt, &p.From) + v := c.regoff(&p.From) r := p.From.Reg if r == 0 { r = o.param @@ -3099,7 +3110,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { } case 31: // dword - wd := uint64(vregoff(ctxt, &p.From)) + wd := uint64(c.vregoff(&p.From)) *asm = append(*asm, uint8(wd>>56), uint8(wd>>48), @@ -3114,7 +3125,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { var opcode uint32 switch p.As { default: - ctxt.Diag("invalid opcode") + c.ctxt.Diag("invalid opcode") case AFADD: opcode = op_ADBR case AFADDS: @@ -3165,7 +3176,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { var opcode uint32 switch p.As { default: - ctxt.Diag("invalid opcode") + c.ctxt.Diag("invalid opcode") case AFMADD: opcode = op_MADBR case AFMADDS: @@ -3178,7 +3189,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { zRRD(opcode, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), asm) case 35: // mov reg mem (no relocation) - d2 := regoff(ctxt, &p.To) + d2 := c.regoff(&p.To) b2 := p.To.Reg if b2 == 0 { b2 = o.param @@ -3192,10 +3203,10 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { x2 = REGTMP d2 = 0 } - zRXY(zopstore(ctxt, p.As), uint32(p.From.Reg), uint32(x2), uint32(b2), uint32(d2), asm) + zRXY(c.zopstore(p.As), uint32(p.From.Reg), uint32(x2), uint32(b2), uint32(d2), asm) case 36: // mov mem reg (no relocation) - d2 := regoff(ctxt, &p.From) + d2 := c.regoff(&p.From) b2 := p.From.Reg if b2 == 0 { b2 = o.param @@ -3209,10 +3220,10 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { x2 = REGTMP d2 = 0 } - zRXY(zopload(ctxt, p.As), uint32(p.To.Reg), uint32(x2), uint32(b2), uint32(d2), asm) + zRXY(c.zopload(p.As), uint32(p.To.Reg), uint32(x2), uint32(b2), uint32(d2), asm) case 40: // word/byte - wd := uint32(regoff(ctxt, &p.From)) + wd := uint32(c.regoff(&p.From)) if p.As == AWORD { //WORD *asm = append(*asm, uint8(wd>>24), uint8(wd>>16), uint8(wd>>8), uint8(wd)) } else { //BYTE @@ -3232,9 +3243,9 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { } case 48: // floating-point round to integer - m3 := vregoff(ctxt, &p.From) + m3 := c.vregoff(&p.From) if 0 > m3 || m3 > 7 { - ctxt.Diag("mask (%v) must be in the range [0, 7]", m3) + c.ctxt.Diag("mask (%v) must be in the range [0, 7]", m3) } var opcode uint32 switch p.As { @@ -3263,21 +3274,21 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { case 70: // cmp reg reg if p.As == ACMPW || p.As == ACMPWU { - zRR(zoprr(ctxt, p.As), uint32(p.From.Reg), uint32(p.To.Reg), asm) + zRR(c.zoprr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), asm) } else { - zRRE(zoprre(ctxt, p.As), uint32(p.From.Reg), uint32(p.To.Reg), asm) + zRRE(c.zoprre(p.As), uint32(p.From.Reg), uint32(p.To.Reg), asm) } case 71: // cmp reg $constant - v := vregoff(ctxt, &p.To) + v := c.vregoff(&p.To) switch p.As { case ACMP, ACMPW: if int64(int32(v)) != v { - ctxt.Diag("%v overflows an int32", v) + c.ctxt.Diag("%v overflows an int32", v) } case ACMPU, ACMPWU: if int64(uint32(v)) != v { - ctxt.Diag("%v overflows a uint32", v) + c.ctxt.Diag("%v overflows a uint32", v) } } if p.As == ACMP && int64(int16(v)) == v { @@ -3285,12 +3296,12 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { } else if p.As == ACMPW && int64(int16(v)) == v { zRI(op_CHI, uint32(p.From.Reg), uint32(v), asm) } else { - zRIL(_a, zopril(ctxt, p.As), uint32(p.From.Reg), uint32(v), asm) + zRIL(_a, c.zopril(p.As), uint32(p.From.Reg), uint32(v), asm) } case 72: // mov $constant mem - v := regoff(ctxt, &p.From) - d := regoff(ctxt, &p.To) + v := c.regoff(&p.From) + d := c.regoff(&p.To) r := p.To.Reg x := p.To.Index if r == 0 { @@ -3337,19 +3348,19 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { } d = 0 } - zRXY(zopstore(ctxt, p.As), REGTMP2, uint32(x), uint32(r), uint32(d), asm) + zRXY(c.zopstore(p.As), REGTMP2, uint32(x), uint32(r), uint32(d), asm) } case 73: // mov $constant addr (including relocation) - v := regoff(ctxt, &p.From) - d := regoff(ctxt, &p.To) + v := c.regoff(&p.From) + d := c.regoff(&p.To) a := uint32(0) if d&1 != 0 { d -= 1 a = 1 } zRIL(_b, op_LARL, REGTMP, uint32(d), asm) - addrilreloc(ctxt, p.To.Sym, int64(d)) + c.addrilreloc(p.To.Sym, int64(d)) if int32(int16(v)) == v { var opcode uint32 switch p.As { @@ -3369,11 +3380,11 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { } } else { zRIL(_a, op_LGFI, REGTMP2, uint32(v), asm) - zRXY(zopstore(ctxt, p.As), REGTMP2, 0, REGTMP, a, asm) + zRXY(c.zopstore(p.As), REGTMP2, 0, REGTMP, a, asm) } case 74: // mov reg addr (including relocation) - i2 := regoff(ctxt, &p.To) + i2 := c.regoff(&p.To) switch p.As { case AMOVD: zRIL(_b, op_STGRL, uint32(p.From.Reg), 0, asm) @@ -3396,10 +3407,10 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { zRIL(_b, op_LARL, REGTMP, 0, asm) zRX(op_STE, uint32(p.From.Reg), 0, REGTMP, 0, asm) } - addrilreloc(ctxt, p.To.Sym, int64(i2)) + c.addrilreloc(p.To.Sym, int64(i2)) case 75: // mov addr reg (including relocation) - i2 := regoff(ctxt, &p.From) + i2 := c.regoff(&p.From) switch p.As { case AMOVD: if i2&1 != 0 { @@ -3437,11 +3448,11 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { zRIL(_a, op_LARL, REGTMP, 0, asm) zRX(op_LE, uint32(p.To.Reg), 0, REGTMP, 0, asm) } - addrilreloc(ctxt, p.From.Sym, int64(i2)) + c.addrilreloc(p.From.Sym, int64(i2)) case 77: // syscall $constant if p.From.Offset > 255 || p.From.Offset < 1 { - ctxt.Diag("illegal system call; system call number out of range: %v", p) + c.ctxt.Diag("illegal system call; system call number out of range: %v", p) zE(op_TRAP2, asm) // trap always } else { zI(op_SVC, uint32(p.From.Offset), asm) @@ -3453,7 +3464,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { *asm = append(*asm, 0, 0, 0, 0) case 79: // compare and swap reg reg reg - v := regoff(ctxt, &p.To) + v := c.regoff(&p.To) if v < 0 { v = 0 } @@ -3521,12 +3532,12 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { zRRF(opcode, 5, 0, uint32(p.To.Reg), uint32(p.From.Reg), asm) case 84: // storage-and-storage operations $length mem mem (length in From3) - l := regoff(ctxt, p.From3) + l := c.regoff(p.From3) if l < 1 || l > 256 { - ctxt.Diag("number of bytes (%v) not in range [1,256]", l) + c.ctxt.Diag("number of bytes (%v) not in range [1,256]", l) } if p.From.Index != 0 || p.To.Index != 0 { - ctxt.Diag("cannot use index reg") + c.ctxt.Diag("cannot use index reg") } b1 := p.To.Reg b2 := p.From.Reg @@ -3536,11 +3547,11 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { if b2 == 0 { b2 = o.param } - d1 := regoff(ctxt, &p.To) - d2 := regoff(ctxt, &p.From) + d1 := c.regoff(&p.To) + d2 := c.regoff(&p.From) if d1 < 0 || d1 >= DISP12 { if b2 == REGTMP { - ctxt.Diag("REGTMP conflict") + c.ctxt.Diag("REGTMP conflict") } if b1 != REGTMP { zRRE(op_LGR, REGTMP, uint32(b1), asm) @@ -3555,7 +3566,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { } if d2 < 0 || d2 >= DISP12 { if b1 == REGTMP2 { - ctxt.Diag("REGTMP2 conflict") + c.ctxt.Diag("REGTMP2 conflict") } if b2 != REGTMP2 { zRRE(op_LGR, REGTMP2, uint32(b2), asm) @@ -3567,7 +3578,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { var opcode uint32 switch p.As { default: - ctxt.Diag("unexpected opcode %v", p.As) + c.ctxt.Diag("unexpected opcode %v", p.As) case AMVC: opcode = op_MVC case ACLC: @@ -3585,19 +3596,19 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { zSS(_a, opcode, uint32(l-1), 0, uint32(b1), uint32(d1), uint32(b2), uint32(d2), asm) case 85: // load address relative long - v := regoff(ctxt, &p.From) + v := c.regoff(&p.From) if p.From.Sym == nil { if (v & 1) != 0 { - ctxt.Diag("cannot use LARL with odd offset: %v", v) + c.ctxt.Diag("cannot use LARL with odd offset: %v", v) } } else { - addrilreloc(ctxt, p.From.Sym, int64(v)) + c.addrilreloc(p.From.Sym, int64(v)) v = 0 } zRIL(_b, op_LARL, uint32(p.To.Reg), uint32(v>>1), asm) case 86: // load address - d := vregoff(ctxt, &p.From) + d := c.vregoff(&p.From) x := p.From.Index b := p.From.Reg if b == 0 { @@ -3611,13 +3622,13 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { } case 87: // execute relative long - v := vregoff(ctxt, &p.From) + v := c.vregoff(&p.From) if p.From.Sym == nil { if v&1 != 0 { - ctxt.Diag("cannot use EXRL with odd offset: %v", v) + c.ctxt.Diag("cannot use EXRL with odd offset: %v", v) } } else { - addrilreloc(ctxt, p.From.Sym, v) + c.addrilreloc(p.From.Sym, v) v = 0 } zRIL(_b, op_EXRL, uint32(p.To.Reg), uint32(v>>1), asm) @@ -3634,7 +3645,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { case ASTCKF: opcode = op_STCKF } - v := vregoff(ctxt, &p.To) + v := c.vregoff(&p.To) r := int(p.To.Reg) if r == 0 { r = int(o.param) @@ -3655,7 +3666,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { opcode = op_CLGRJ opcode2 = op_CLGR } - mask := branchMask(ctxt, p) + mask := c.branchMask(p) if int32(int16(v)) != v { zRRE(opcode2, uint32(p.From.Reg), uint32(p.Reg), asm) zRIL(_c, op_BRCL, mask, uint32(v-sizeRRE/2), asm) @@ -3677,22 +3688,22 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { opcode = op_CLGIJ opcode2 = op_CLGFI } - mask := branchMask(ctxt, p) + mask := c.branchMask(p) if int32(int16(v)) != v { - zRIL(_a, opcode2, uint32(p.From.Reg), uint32(regoff(ctxt, p.From3)), asm) + zRIL(_a, opcode2, uint32(p.From.Reg), uint32(c.regoff(p.From3)), asm) zRIL(_c, op_BRCL, mask, uint32(v-sizeRIL/2), asm) } else { - zRIE(_c, opcode, uint32(p.From.Reg), mask, uint32(v), 0, 0, 0, uint32(regoff(ctxt, p.From3)), asm) + zRIE(_c, opcode, uint32(p.From.Reg), mask, uint32(v), 0, 0, 0, uint32(c.regoff(p.From3)), asm) } case 93: // GOT lookup - v := vregoff(ctxt, &p.To) + v := c.vregoff(&p.To) if v != 0 { - ctxt.Diag("invalid offset against GOT slot %v", p) + c.ctxt.Diag("invalid offset against GOT slot %v", p) } zRIL(_b, op_LGRL, uint32(p.To.Reg), 0, asm) - rel := obj.Addrel(ctxt.Cursym) - rel.Off = int32(ctxt.Pc + 2) + rel := obj.Addrel(c.cursym) + rel.Off = int32(c.pc + 2) rel.Siz = 4 rel.Sym = p.From.Sym rel.Type = obj.R_GOTPCREL @@ -3703,8 +3714,8 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { zRXY(op_LG, uint32(p.To.Reg), REGTMP, 0, 0, asm) zRI(op_BRC, 0xF, (sizeRI+8)>>1, asm) *asm = append(*asm, 0, 0, 0, 0, 0, 0, 0, 0) - rel := obj.Addrel(ctxt.Cursym) - rel.Off = int32(ctxt.Pc + sizeRIL + sizeRXY + sizeRI) + rel := obj.Addrel(c.cursym) + rel.Off = int32(c.pc + sizeRIL + sizeRXY + sizeRI) rel.Siz = 8 rel.Sym = p.From.Sym rel.Type = obj.R_TLS_LE @@ -3723,8 +3734,8 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { // R_390_TLS_IEENT zRIL(_b, op_LARL, REGTMP, 0, asm) - ieent := obj.Addrel(ctxt.Cursym) - ieent.Off = int32(ctxt.Pc + 2) + ieent := obj.Addrel(c.cursym) + ieent.Off = int32(c.pc + 2) ieent.Siz = 4 ieent.Sym = p.From.Sym ieent.Type = obj.R_TLS_IE @@ -3736,14 +3747,14 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { // not strictly required but might allow the linker to optimize case 96: // clear macro - length := vregoff(ctxt, &p.From) - offset := vregoff(ctxt, &p.To) + length := c.vregoff(&p.From) + offset := c.vregoff(&p.To) reg := p.To.Reg if reg == 0 { reg = o.param } if length <= 0 { - ctxt.Diag("cannot CLEAR %d bytes, must be greater than 0", length) + c.ctxt.Diag("cannot CLEAR %d bytes, must be greater than 0", length) } for length > 0 { if offset < 0 || offset >= DISP12 { @@ -3783,7 +3794,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { case 97: // store multiple rstart := p.From.Reg rend := p.Reg - offset := regoff(ctxt, &p.To) + offset := c.regoff(&p.To) reg := p.To.Reg if reg == 0 { reg = o.param @@ -3810,7 +3821,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { case 98: // load multiple rstart := p.Reg rend := p.To.Reg - offset := regoff(ctxt, &p.From) + offset := c.regoff(&p.From) reg := p.From.Reg if reg == 0 { reg = o.param @@ -3836,11 +3847,11 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { case 99: // interlocked load and op if p.To.Index != 0 { - ctxt.Diag("cannot use indexed address") + c.ctxt.Diag("cannot use indexed address") } - offset := regoff(ctxt, &p.To) + offset := c.regoff(&p.To) if offset < -DISP20/2 || offset >= DISP20/2 { - ctxt.Diag("%v does not fit into 20-bit signed integer", offset) + c.ctxt.Diag("%v does not fit into 20-bit signed integer", offset) } var opcode uint32 switch p.As { @@ -3870,49 +3881,49 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { case 100: // VRX STORE op, m3, _ := vop(p.As) if p.From3 != nil { - m3 = uint32(vregoff(ctxt, p.From3)) + m3 = uint32(c.vregoff(p.From3)) } b2 := p.To.Reg if b2 == 0 { b2 = o.param } - d2 := uint32(vregoff(ctxt, &p.To)) + d2 := uint32(c.vregoff(&p.To)) zVRX(op, uint32(p.From.Reg), uint32(p.To.Index), uint32(b2), d2, m3, asm) case 101: // VRX LOAD op, m3, _ := vop(p.As) if p.From3 != nil { - m3 = uint32(vregoff(ctxt, p.From3)) + m3 = uint32(c.vregoff(p.From3)) } b2 := p.From.Reg if b2 == 0 { b2 = o.param } - d2 := uint32(vregoff(ctxt, &p.From)) + d2 := uint32(c.vregoff(&p.From)) zVRX(op, uint32(p.To.Reg), uint32(p.From.Index), uint32(b2), d2, m3, asm) case 102: // VRV SCATTER op, m3, _ := vop(p.As) if p.From3 != nil { - m3 = uint32(vregoff(ctxt, p.From3)) + m3 = uint32(c.vregoff(p.From3)) } b2 := p.To.Reg if b2 == 0 { b2 = o.param } - d2 := uint32(vregoff(ctxt, &p.To)) + d2 := uint32(c.vregoff(&p.To)) zVRV(op, uint32(p.From.Reg), uint32(p.To.Index), uint32(b2), d2, m3, asm) case 103: // VRV GATHER op, m3, _ := vop(p.As) if p.From3 != nil { - m3 = uint32(vregoff(ctxt, p.From3)) + m3 = uint32(c.vregoff(p.From3)) } b2 := p.From.Reg if b2 == 0 { b2 = o.param } - d2 := uint32(vregoff(ctxt, &p.From)) + d2 := uint32(c.vregoff(&p.From)) zVRV(op, uint32(p.To.Reg), uint32(p.From.Index), uint32(b2), d2, m3, asm) case 104: // VRS SHIFT/ROTATE and LOAD GR FROM VR ELEMENT @@ -3921,12 +3932,12 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { if fr == 0 { fr = p.To.Reg } - bits := uint32(vregoff(ctxt, &p.From)) + bits := uint32(c.vregoff(&p.From)) zVRS(op, uint32(p.To.Reg), uint32(fr), uint32(p.From.Reg), bits, m4, asm) case 105: // VRS STORE MULTIPLE op, _, _ := vop(p.As) - offset := uint32(vregoff(ctxt, &p.To)) + offset := uint32(c.vregoff(&p.To)) reg := p.To.Reg if reg == 0 { reg = o.param @@ -3935,7 +3946,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { case 106: // VRS LOAD MULTIPLE op, _, _ := vop(p.As) - offset := uint32(vregoff(ctxt, &p.From)) + offset := uint32(c.vregoff(&p.From)) reg := p.From.Reg if reg == 0 { reg = o.param @@ -3944,7 +3955,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { case 107: // VRS STORE WITH LENGTH op, _, _ := vop(p.As) - offset := uint32(vregoff(ctxt, &p.To)) + offset := uint32(c.vregoff(&p.To)) reg := p.To.Reg if reg == 0 { reg = o.param @@ -3953,7 +3964,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { case 108: // VRS LOAD WITH LENGTH op, _, _ := vop(p.As) - offset := uint32(vregoff(ctxt, &p.From)) + offset := uint32(c.vregoff(&p.From)) reg := p.From.Reg if reg == 0 { reg = o.param @@ -3962,7 +3973,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { case 109: // VRI-a op, m3, _ := vop(p.As) - i2 := uint32(vregoff(ctxt, &p.From)) + i2 := uint32(c.vregoff(&p.From)) switch p.As { case AVZERO: i2 = 0 @@ -3970,30 +3981,30 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { i2 = 0xffff } if p.From3 != nil { - m3 = uint32(vregoff(ctxt, p.From3)) + m3 = uint32(c.vregoff(p.From3)) } zVRIa(op, uint32(p.To.Reg), i2, m3, asm) case 110: op, m4, _ := vop(p.As) - i2 := uint32(vregoff(ctxt, p.From3)) - i3 := uint32(vregoff(ctxt, &p.From)) + i2 := uint32(c.vregoff(p.From3)) + i3 := uint32(c.vregoff(&p.From)) zVRIb(op, uint32(p.To.Reg), i2, i3, m4, asm) case 111: op, m4, _ := vop(p.As) - i2 := uint32(vregoff(ctxt, &p.From)) + i2 := uint32(c.vregoff(&p.From)) zVRIc(op, uint32(p.To.Reg), uint32(p.Reg), i2, m4, asm) case 112: op, m5, _ := vop(p.As) - i4 := uint32(vregoff(ctxt, p.From3)) + i4 := uint32(c.vregoff(p.From3)) zVRId(op, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), i4, m5, asm) case 113: op, m4, _ := vop(p.As) m5 := singleElementMask(p.As) - i3 := uint32(vregoff(ctxt, &p.From)) + i3 := uint32(c.vregoff(&p.From)) zVRIe(op, uint32(p.To.Reg), uint32(p.Reg), i3, m5, m4, asm) case 114: // VRR-a @@ -4054,25 +4065,25 @@ func asmout(ctxt *obj.Link, p *obj.Prog, asm *[]byte) { case 123: // VPDI $m4, V2, V3, V1 op, _, _ := vop(p.As) - m4 := regoff(ctxt, p.From3) + m4 := c.regoff(p.From3) zVRRc(op, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), 0, 0, uint32(m4), asm) } } -func vregoff(ctxt *obj.Link, a *obj.Addr) int64 { - ctxt.Instoffset = 0 +func (c *ctxtz) vregoff(a *obj.Addr) int64 { + c.instoffset = 0 if a != nil { - aclass(ctxt, a) + c.aclass(a) } - return ctxt.Instoffset + return c.instoffset } -func regoff(ctxt *obj.Link, a *obj.Addr) int32 { - return int32(vregoff(ctxt, a)) +func (c *ctxtz) regoff(a *obj.Addr) int32 { + return int32(c.vregoff(a)) } // zopload returns the RXY op for the given load -func zopload(ctxt *obj.Link, a obj.As) uint32 { +func (c *ctxtz) zopload(a obj.As) uint32 { switch a { // fixed point load case AMOVD: @@ -4105,12 +4116,12 @@ func zopload(ctxt *obj.Link, a obj.As) uint32 { return op_LRVH } - ctxt.Diag("unknown store opcode %v", a) + c.ctxt.Diag("unknown store opcode %v", a) return 0 } // zopstore returns the RXY op for the given store -func zopstore(ctxt *obj.Link, a obj.As) uint32 { +func (c *ctxtz) zopstore(a obj.As) uint32 { switch a { // fixed point store case AMOVD: @@ -4137,12 +4148,12 @@ func zopstore(ctxt *obj.Link, a obj.As) uint32 { return op_STRVH } - ctxt.Diag("unknown store opcode %v", a) + c.ctxt.Diag("unknown store opcode %v", a) return 0 } // zoprre returns the RRE op for the given a -func zoprre(ctxt *obj.Link, a obj.As) uint32 { +func (c *ctxtz) zoprre(a obj.As) uint32 { switch a { case ACMP: return op_CGR @@ -4155,24 +4166,24 @@ func zoprre(ctxt *obj.Link, a obj.As) uint32 { case ACEBR: return op_CEBR } - ctxt.Diag("unknown rre opcode %v", a) + c.ctxt.Diag("unknown rre opcode %v", a) return 0 } // zoprr returns the RR op for the given a -func zoprr(ctxt *obj.Link, a obj.As) uint32 { +func (c *ctxtz) zoprr(a obj.As) uint32 { switch a { case ACMPW: return op_CR case ACMPWU: return op_CLR } - ctxt.Diag("unknown rr opcode %v", a) + c.ctxt.Diag("unknown rr opcode %v", a) return 0 } // zopril returns the RIL op for the given a -func zopril(ctxt *obj.Link, a obj.As) uint32 { +func (c *ctxtz) zopril(a obj.As) uint32 { switch a { case ACMP: return op_CGFI @@ -4183,7 +4194,7 @@ func zopril(ctxt *obj.Link, a obj.As) uint32 { case ACMPWU: return op_CLFI } - ctxt.Diag("unknown ril opcode %v", a) + c.ctxt.Diag("unknown ril opcode %v", a) return 0 } diff --git a/src/cmd/internal/obj/s390x/objz.go b/src/cmd/internal/obj/s390x/objz.go index b39d94e9d0d..8b86dbb4045 100644 --- a/src/cmd/internal/obj/s390x/objz.go +++ b/src/cmd/internal/obj/s390x/objz.go @@ -39,13 +39,11 @@ func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { p.From.Class = 0 p.To.Class = 0 + c := ctxtz{ctxt: ctxt, newprog: newprog} + // Rewrite BR/BL to symbol as TYPE_BRANCH. switch p.As { - case ABR, - ABL, - obj.ARET, - obj.ADUFFZERO, - obj.ADUFFCOPY: + case ABR, ABL, obj.ARET, obj.ADUFFZERO, obj.ADUFFCOPY: if p.To.Sym != nil { p.To.Type = obj.TYPE_BRANCH } @@ -107,13 +105,13 @@ func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { } } - if ctxt.Flag_dynlink { - rewriteToUseGot(ctxt, p, newprog) + if c.ctxt.Flag_dynlink { + c.rewriteToUseGot(p) } } // Rewrite p, if necessary, to access global data via the global offset table. -func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { +func (c *ctxtz) rewriteToUseGot(p *obj.Prog) { // At the moment EXRL instructions are not emitted by the compiler and only reference local symbols in // assembly code. if p.As == AEXRL { @@ -127,13 +125,13 @@ func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { // MOVD $sym, Rx becomes MOVD sym@GOT, Rx // MOVD $sym+, Rx becomes MOVD sym@GOT, Rx; ADD , Rx if p.To.Type != obj.TYPE_REG || p.As != AMOVD { - ctxt.Diag("do not know how to handle LEA-type insn to non-register in %v with -dynlink", p) + c.ctxt.Diag("do not know how to handle LEA-type insn to non-register in %v with -dynlink", p) } p.From.Type = obj.TYPE_MEM p.From.Name = obj.NAME_GOTREF q := p if p.From.Offset != 0 { - q = obj.Appendp(p, newprog) + q = obj.Appendp(p, c.newprog) q.As = AADD q.From.Type = obj.TYPE_CONST q.From.Offset = p.From.Offset @@ -142,7 +140,7 @@ func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { } } if p.From3 != nil && p.From3.Name == obj.NAME_EXTERN { - ctxt.Diag("don't know how to handle %v with -dynlink", p) + c.ctxt.Diag("don't know how to handle %v with -dynlink", p) } var source *obj.Addr // MOVD sym, Ry becomes MOVD sym@GOT, REGTMP; MOVD (REGTMP), Ry @@ -150,7 +148,7 @@ func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { // An addition may be inserted between the two MOVs if there is an offset. if p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local() { if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local() { - ctxt.Diag("cannot handle NAME_EXTERN on both sides in %v with -dynlink", p) + c.ctxt.Diag("cannot handle NAME_EXTERN on both sides in %v with -dynlink", p) } source = &p.From } else if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local() { @@ -165,10 +163,10 @@ func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { return } if source.Type != obj.TYPE_MEM { - ctxt.Diag("don't know how to handle %v with -dynlink", p) + c.ctxt.Diag("don't know how to handle %v with -dynlink", p) } - p1 := obj.Appendp(p, newprog) - p2 := obj.Appendp(p1, newprog) + p1 := obj.Appendp(p, c.newprog) + p2 := obj.Appendp(p1, c.newprog) p1.As = AMOVD p1.From.Type = obj.TYPE_MEM @@ -196,13 +194,13 @@ func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { // TODO(minux): add morestack short-cuts with small fixed frame-size. - ctxt.Cursym = cursym - if cursym.Text == nil || cursym.Text.Link == nil { return } - p := cursym.Text + c := ctxtz{ctxt: ctxt, cursym: cursym, newprog: newprog} + + p := c.cursym.Text textstksiz := p.To.Offset if textstksiz == -8 { // Compatibility hack. @@ -210,16 +208,16 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { textstksiz = 0 } if textstksiz%8 != 0 { - ctxt.Diag("frame size %d not a multiple of 8", textstksiz) + c.ctxt.Diag("frame size %d not a multiple of 8", textstksiz) } if p.From3.Offset&obj.NOFRAME != 0 { if textstksiz != 0 { - ctxt.Diag("NOFRAME functions must have a frame size of 0, not %d", textstksiz) + c.ctxt.Diag("NOFRAME functions must have a frame size of 0, not %d", textstksiz) } } - cursym.Args = p.To.Val.(int32) - cursym.Locals = int32(textstksiz) + c.cursym.Args = p.To.Val.(int32) + c.cursym.Locals = int32(textstksiz) /* * find leaf subroutines @@ -228,7 +226,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { */ var q *obj.Prog - for p := cursym.Text; p != nil; p = p.Link { + for p := c.cursym.Text; p != nil; p = p.Link { switch p.As { case obj.ATEXT: q = p @@ -236,7 +234,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { case ABL, ABCL: q = p - cursym.Text.Mark &^= LEAF + c.cursym.Text.Mark &^= LEAF fallthrough case ABC, @@ -287,7 +285,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { var pPre *obj.Prog var pPreempt *obj.Prog wasSplit := false - for p := cursym.Text; p != nil; p = p.Link { + for p := c.cursym.Text; p != nil; p = p.Link { pLast = p switch p.As { case obj.ATEXT: @@ -301,7 +299,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { if p.From3.Offset&obj.NOFRAME == 0 { // If there is a stack frame at all, it includes // space to save the LR. - autosize += int32(ctxt.FixedFrameSize()) + autosize += int32(c.ctxt.FixedFrameSize()) } if p.Mark&LEAF != 0 && autosize < obj.StackSmall { @@ -315,7 +313,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { q := p if p.From3.Offset&obj.NOSPLIT == 0 { - p, pPreempt = stacksplitPre(ctxt, p, newprog, autosize) // emit pre part of split check + p, pPreempt = c.stacksplitPre(p, autosize) // emit pre part of split check pPre = p wasSplit = true //need post part of split } @@ -326,7 +324,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { // Store link register before decrementing SP, so if a signal comes // during the execution of the function prologue, the traceback // code will not see a half-updated stack frame. - q = obj.Appendp(p, newprog) + q = obj.Appendp(p, c.newprog) q.As = AMOVD q.From.Type = obj.TYPE_REG q.From.Reg = REG_LR @@ -334,7 +332,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { q.To.Reg = REGSP q.To.Offset = int64(-autosize) - q = obj.Appendp(q, newprog) + q = obj.Appendp(q, c.newprog) q.As = AMOVD q.From.Type = obj.TYPE_ADDR q.From.Offset = int64(-autosize) @@ -342,19 +340,19 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { q.To.Type = obj.TYPE_REG q.To.Reg = REGSP q.Spadj = autosize - } else if cursym.Text.Mark&LEAF == 0 { + } else if c.cursym.Text.Mark&LEAF == 0 { // A very few functions that do not return to their caller // (e.g. gogo) are not identified as leaves but still have // no frame. - cursym.Text.Mark |= LEAF + c.cursym.Text.Mark |= LEAF } - if cursym.Text.Mark&LEAF != 0 { - cursym.Set(obj.AttrLeaf, true) + if c.cursym.Text.Mark&LEAF != 0 { + c.cursym.Set(obj.AttrLeaf, true) break } - if cursym.Text.From3.Offset&obj.WRAPPER != 0 { + if c.cursym.Text.From3.Offset&obj.WRAPPER != 0 { // if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame // // MOVD g_panic(g), R3 @@ -372,28 +370,28 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { // The NOP is needed to give the jumps somewhere to land. // It is a liblink NOP, not a s390x NOP: it encodes to 0 instruction bytes. - q = obj.Appendp(q, newprog) + q = obj.Appendp(q, c.newprog) q.As = AMOVD q.From.Type = obj.TYPE_MEM q.From.Reg = REGG - q.From.Offset = 4 * int64(ctxt.Arch.PtrSize) // G.panic + q.From.Offset = 4 * int64(c.ctxt.Arch.PtrSize) // G.panic q.To.Type = obj.TYPE_REG q.To.Reg = REG_R3 - q = obj.Appendp(q, newprog) + q = obj.Appendp(q, c.newprog) q.As = ACMP q.From.Type = obj.TYPE_REG q.From.Reg = REG_R3 q.To.Type = obj.TYPE_CONST q.To.Offset = 0 - q = obj.Appendp(q, newprog) + q = obj.Appendp(q, c.newprog) q.As = ABEQ q.To.Type = obj.TYPE_BRANCH p1 := q - q = obj.Appendp(q, newprog) + q = obj.Appendp(q, c.newprog) q.As = AMOVD q.From.Type = obj.TYPE_MEM q.From.Reg = REG_R3 @@ -401,35 +399,35 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { q.To.Type = obj.TYPE_REG q.To.Reg = REG_R4 - q = obj.Appendp(q, newprog) + q = obj.Appendp(q, c.newprog) q.As = AADD q.From.Type = obj.TYPE_CONST - q.From.Offset = int64(autosize) + ctxt.FixedFrameSize() + q.From.Offset = int64(autosize) + c.ctxt.FixedFrameSize() q.Reg = REGSP q.To.Type = obj.TYPE_REG q.To.Reg = REG_R5 - q = obj.Appendp(q, newprog) + q = obj.Appendp(q, c.newprog) q.As = ACMP q.From.Type = obj.TYPE_REG q.From.Reg = REG_R4 q.To.Type = obj.TYPE_REG q.To.Reg = REG_R5 - q = obj.Appendp(q, newprog) + q = obj.Appendp(q, c.newprog) q.As = ABNE q.To.Type = obj.TYPE_BRANCH p2 := q - q = obj.Appendp(q, newprog) + q = obj.Appendp(q, c.newprog) q.As = AADD q.From.Type = obj.TYPE_CONST - q.From.Offset = ctxt.FixedFrameSize() + q.From.Offset = c.ctxt.FixedFrameSize() q.Reg = REGSP q.To.Type = obj.TYPE_REG q.To.Reg = REG_R6 - q = obj.Appendp(q, newprog) + q = obj.Appendp(q, c.newprog) q.As = AMOVD q.From.Type = obj.TYPE_REG q.From.Reg = REG_R6 @@ -437,7 +435,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { q.To.Reg = REG_R3 q.To.Offset = 0 // Panic.argp - q = obj.Appendp(q, newprog) + q = obj.Appendp(q, c.newprog) q.As = obj.ANOP p1.Pcond = q @@ -447,7 +445,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { case obj.ARET: retTarget := p.To.Sym - if cursym.Text.Mark&LEAF != 0 { + if c.cursym.Text.Mark&LEAF != 0 { if autosize == 0 { p.As = ABR p.From = obj.Addr{} @@ -469,7 +467,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { p.To.Reg = REGSP p.Spadj = -autosize - q = obj.Appendp(p, newprog) + q = obj.Appendp(p, c.newprog) q.As = ABR q.From = obj.Addr{} q.To.Type = obj.TYPE_REG @@ -489,7 +487,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { q = p if autosize != 0 { - q = obj.Appendp(q, newprog) + q = obj.Appendp(q, c.newprog) q.As = AADD q.From.Type = obj.TYPE_CONST q.From.Offset = int64(autosize) @@ -498,7 +496,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { q.Spadj = -autosize } - q = obj.Appendp(q, newprog) + q = obj.Appendp(q, c.newprog) q.As = ABR q.From = obj.Addr{} if retTarget == nil { @@ -518,22 +516,22 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { } } if wasSplit { - stacksplitPost(ctxt, pLast, pPre, pPreempt, newprog, autosize) // emit post part of split check + c.stacksplitPost(pLast, pPre, pPreempt, autosize) // emit post part of split check } } -func stacksplitPre(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc, framesize int32) (*obj.Prog, *obj.Prog) { +func (c *ctxtz) stacksplitPre(p *obj.Prog, framesize int32) (*obj.Prog, *obj.Prog) { var q *obj.Prog // MOVD g_stackguard(g), R3 - p = obj.Appendp(p, newprog) + p = obj.Appendp(p, c.newprog) p.As = AMOVD p.From.Type = obj.TYPE_MEM p.From.Reg = REGG - p.From.Offset = 2 * int64(ctxt.Arch.PtrSize) // G.stackguard0 - if ctxt.Cursym.CFunc() { - p.From.Offset = 3 * int64(ctxt.Arch.PtrSize) // G.stackguard1 + p.From.Offset = 2 * int64(c.ctxt.Arch.PtrSize) // G.stackguard0 + if c.cursym.CFunc() { + p.From.Offset = 3 * int64(c.ctxt.Arch.PtrSize) // G.stackguard1 } p.To.Type = obj.TYPE_REG p.To.Reg = REG_R3 @@ -548,7 +546,7 @@ func stacksplitPre(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc, framesize // q1: BLT done - p = obj.Appendp(p, newprog) + p = obj.Appendp(p, c.newprog) //q1 = p p.From.Type = obj.TYPE_REG p.From.Reg = REG_R3 @@ -571,7 +569,7 @@ func stacksplitPre(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc, framesize // large stack: SP-framesize < stackguard-StackSmall // ADD $-(framesize-StackSmall), SP, R4 // CMP stackguard, R4 - p = obj.Appendp(p, newprog) + p = obj.Appendp(p, c.newprog) p.As = AADD p.From.Type = obj.TYPE_CONST @@ -580,7 +578,7 @@ func stacksplitPre(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc, framesize p.To.Type = obj.TYPE_REG p.To.Reg = REG_R4 - p = obj.Appendp(p, newprog) + p = obj.Appendp(p, c.newprog) p.From.Type = obj.TYPE_REG p.From.Reg = REG_R3 p.Reg = REG_R4 @@ -603,7 +601,7 @@ func stacksplitPre(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc, framesize // SUB R3, R4 // MOVD $(framesize+(StackGuard-StackSmall)), TEMP // CMPUBGE TEMP, R4 - p = obj.Appendp(p, newprog) + p = obj.Appendp(p, c.newprog) p.As = ACMP p.From.Type = obj.TYPE_REG @@ -611,12 +609,12 @@ func stacksplitPre(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc, framesize p.To.Type = obj.TYPE_CONST p.To.Offset = obj.StackPreempt - p = obj.Appendp(p, newprog) + p = obj.Appendp(p, c.newprog) q = p p.As = ABEQ p.To.Type = obj.TYPE_BRANCH - p = obj.Appendp(p, newprog) + p = obj.Appendp(p, c.newprog) p.As = AADD p.From.Type = obj.TYPE_CONST p.From.Offset = obj.StackGuard @@ -624,21 +622,21 @@ func stacksplitPre(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc, framesize p.To.Type = obj.TYPE_REG p.To.Reg = REG_R4 - p = obj.Appendp(p, newprog) + p = obj.Appendp(p, c.newprog) p.As = ASUB p.From.Type = obj.TYPE_REG p.From.Reg = REG_R3 p.To.Type = obj.TYPE_REG p.To.Reg = REG_R4 - p = obj.Appendp(p, newprog) + p = obj.Appendp(p, c.newprog) p.As = AMOVD p.From.Type = obj.TYPE_CONST p.From.Offset = int64(framesize) + obj.StackGuard - obj.StackSmall p.To.Type = obj.TYPE_REG p.To.Reg = REGTMP - p = obj.Appendp(p, newprog) + p = obj.Appendp(p, c.newprog) p.From.Type = obj.TYPE_REG p.From.Reg = REGTMP p.Reg = REG_R4 @@ -649,16 +647,16 @@ func stacksplitPre(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc, framesize return p, q } -func stacksplitPost(ctxt *obj.Link, p *obj.Prog, pPre *obj.Prog, pPreempt *obj.Prog, newprog obj.ProgAlloc, framesize int32) *obj.Prog { +func (c *ctxtz) stacksplitPost(p *obj.Prog, pPre *obj.Prog, pPreempt *obj.Prog, framesize int32) *obj.Prog { // Now we are at the end of the function, but logically // we are still in function prologue. We need to fix the // SP data and PCDATA. - spfix := obj.Appendp(p, newprog) + spfix := obj.Appendp(p, c.newprog) spfix.As = obj.ANOP spfix.Spadj = -framesize - pcdata := obj.Appendp(spfix, newprog) - pcdata.Pos = ctxt.Cursym.Text.Pos + pcdata := obj.Appendp(spfix, c.newprog) + pcdata.Pos = c.cursym.Text.Pos pcdata.As = obj.APCDATA pcdata.From.Type = obj.TYPE_CONST pcdata.From.Offset = obj.PCDATA_StackMapIndex @@ -666,7 +664,7 @@ func stacksplitPost(ctxt *obj.Link, p *obj.Prog, pPre *obj.Prog, pPreempt *obj.P pcdata.To.Offset = -1 // pcdata starts at -1 at function entry // MOVD LR, R5 - p = obj.Appendp(pcdata, newprog) + p = obj.Appendp(pcdata, c.newprog) pPre.Pcond = p p.As = AMOVD p.From.Type = obj.TYPE_REG @@ -678,24 +676,24 @@ func stacksplitPost(ctxt *obj.Link, p *obj.Prog, pPre *obj.Prog, pPreempt *obj.P } // BL runtime.morestack(SB) - p = obj.Appendp(p, newprog) + p = obj.Appendp(p, c.newprog) p.As = ABL p.To.Type = obj.TYPE_BRANCH - if ctxt.Cursym.CFunc() { - p.To.Sym = ctxt.Lookup("runtime.morestackc", 0) - } else if ctxt.Cursym.Text.From3.Offset&obj.NEEDCTXT == 0 { - p.To.Sym = ctxt.Lookup("runtime.morestack_noctxt", 0) + if c.cursym.CFunc() { + p.To.Sym = c.ctxt.Lookup("runtime.morestackc", 0) + } else if c.cursym.Text.From3.Offset&obj.NEEDCTXT == 0 { + p.To.Sym = c.ctxt.Lookup("runtime.morestack_noctxt", 0) } else { - p.To.Sym = ctxt.Lookup("runtime.morestack", 0) + p.To.Sym = c.ctxt.Lookup("runtime.morestack", 0) } // BR start - p = obj.Appendp(p, newprog) + p = obj.Appendp(p, c.newprog) p.As = ABR p.To.Type = obj.TYPE_BRANCH - p.Pcond = ctxt.Cursym.Text.Link + p.Pcond = c.cursym.Text.Link return p }