1
0
mirror of https://github.com/golang/go synced 2024-09-28 20:24:29 -06:00

[dev.regabi] cmd/compile: split out package objw [generated]

Object file writing routines are used not just at the end
of the compilation but also during static data layout in walk.
Split them into their own package.

[git-generate]

cd src/cmd/compile/internal/gc
rf '
	# Move bit vector to new package bitvec
	mv bvec.n bvec.N
	mv bvec.b bvec.B
	mv bvec BitVec
	mv bvalloc New
	mv bvbulkalloc NewBulk
	mv bulkBvec.next bulkBvec.Next
	mv bulkBvec Bulk
	mv H0 h0
	mv Hp hp

	# Leave bvecSet and bitmap hashes behind - not needed as broadly.
	mv bvecSet.extractUniqe bvecSet.extractUnique
	mv h0 bvecSet bvecSet.grow bvecSet.add \
		bvecSet.extractUnique hashbitmap bvset.go

	mv bv.go cmd/compile/internal/bitvec

	ex . ../arm ../arm64 ../mips ../mips64 ../ppc64 ../s390x ../riscv64 {
		import "cmd/internal/obj"
		var a *obj.Addr
		var i int64
		Addrconst(a, i) -> a.SetConst(i)
		var p, to *obj.Prog
		Patch(p, to) -> p.To.SetTarget(to)
	}
	rm Addrconst Patch

	# Move object-writing API to new package objw
	mv duint8 Objw_Uint8
	mv duint16 Objw_Uint16
	mv duint32 Objw_Uint32
	mv duintptr Objw_Uintptr
	mv duintxx Objw_UintN
	mv dsymptr Objw_SymPtr
	mv dsymptrOff Objw_SymPtrOff
	mv dsymptrWeakOff Objw_SymPtrWeakOff
	mv ggloblsym Objw_Global
	mv dbvec Objw_BitVec
	mv newProgs NewProgs
	mv Progs.clearp Progs.Clear
	mv Progs.settext Progs.SetText
	mv Progs.next Progs.Next
	mv Progs.pc Progs.PC
	mv Progs.pos Progs.Pos
	mv Progs.curfn Progs.CurFunc
	mv Progs.progcache Progs.Cache
	mv Progs.cacheidx Progs.CacheIndex
	mv Progs.nextLive Progs.NextLive
	mv Progs.prevLive Progs.PrevLive
	mv Progs.Appendpp Progs.Append
	mv LivenessIndex.stackMapIndex LivenessIndex.StackMapIndex
	mv LivenessIndex.isUnsafePoint LivenessIndex.IsUnsafePoint

	mv Objw_Uint8 Objw_Uint16 Objw_Uint32 Objw_Uintptr Objw_UintN \
		Objw_SymPtr Objw_SymPtrOff Objw_SymPtrWeakOff Objw_Global \
		Objw_BitVec \
		objw.go
	mv sharedProgArray NewProgs Progs \
		LivenessIndex StackMapDontCare \
		LivenessDontCare LivenessIndex.StackMapValid \
		Progs.NewProg Progs.Flush Progs.Free Progs.Prog Progs.Clear Progs.Append Progs.SetText \
		prog.go
	mv prog.go objw.go cmd/compile/internal/objw

	# Move ggloblnod to obj with the rest of the non-objw higher-level writing.
	mv ggloblnod obj.go
'

cd ../objw
rf '
	mv Objw_Uint8 Uint8
	mv Objw_Uint16 Uint16
	mv Objw_Uint32 Uint32
	mv Objw_Uintptr Uintptr
	mv Objw_UintN UintN
	mv Objw_SymPtr SymPtr
	mv Objw_SymPtrOff SymPtrOff
	mv Objw_SymPtrWeakOff SymPtrWeakOff
	mv Objw_Global Global
	mv Objw_BitVec BitVec
'

Change-Id: I2b87085aa788564fb322e9c55bddd73347b4d5fd
Reviewed-on: https://go-review.googlesource.com/c/go/+/279310
Trust: Russ Cox <rsc@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
This commit is contained in:
Russ Cox 2020-12-23 00:46:27 -05:00
parent 575fd6ff0a
commit 0ced54062e
33 changed files with 1008 additions and 946 deletions

View File

@ -6,8 +6,8 @@ package amd64
import ( import (
"cmd/compile/internal/base" "cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/compile/internal/ir" "cmd/compile/internal/ir"
"cmd/compile/internal/objw"
"cmd/compile/internal/types" "cmd/compile/internal/types"
"cmd/internal/obj" "cmd/internal/obj"
"cmd/internal/obj/x86" "cmd/internal/obj/x86"
@ -54,7 +54,7 @@ func dzDI(b int64) int64 {
return -dzClearStep * (dzBlockLen - tailSteps) return -dzClearStep * (dzBlockLen - tailSteps)
} }
func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Prog { func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Prog {
const ( const (
ax = 1 << iota ax = 1 << iota
x0 x0
@ -70,61 +70,61 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Pr
base.Fatalf("zerorange count not a multiple of widthptr %d", cnt) base.Fatalf("zerorange count not a multiple of widthptr %d", cnt)
} }
if *state&ax == 0 { if *state&ax == 0 {
p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0) p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
*state |= ax *state |= ax
} }
p = pp.Appendpp(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off) p = pp.Append(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off)
off += int64(types.PtrSize) off += int64(types.PtrSize)
cnt -= int64(types.PtrSize) cnt -= int64(types.PtrSize)
} }
if cnt == 8 { if cnt == 8 {
if *state&ax == 0 { if *state&ax == 0 {
p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0) p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
*state |= ax *state |= ax
} }
p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off) p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off)
} else if !isPlan9 && cnt <= int64(8*types.RegSize) { } else if !isPlan9 && cnt <= int64(8*types.RegSize) {
if *state&x0 == 0 { if *state&x0 == 0 {
p = pp.Appendpp(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0) p = pp.Append(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0)
*state |= x0 *state |= x0
} }
for i := int64(0); i < cnt/16; i++ { for i := int64(0); i < cnt/16; i++ {
p = pp.Appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, off+i*16) p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, off+i*16)
} }
if cnt%16 != 0 { if cnt%16 != 0 {
p = pp.Appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, off+cnt-int64(16)) p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, off+cnt-int64(16))
} }
} else if !isPlan9 && (cnt <= int64(128*types.RegSize)) { } else if !isPlan9 && (cnt <= int64(128*types.RegSize)) {
if *state&x0 == 0 { if *state&x0 == 0 {
p = pp.Appendpp(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0) p = pp.Append(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0)
*state |= x0 *state |= x0
} }
p = pp.Appendpp(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off+dzDI(cnt), obj.TYPE_REG, x86.REG_DI, 0) p = pp.Append(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off+dzDI(cnt), obj.TYPE_REG, x86.REG_DI, 0)
p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, dzOff(cnt)) p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, dzOff(cnt))
p.To.Sym = ir.Syms.Duffzero p.To.Sym = ir.Syms.Duffzero
if cnt%16 != 0 { if cnt%16 != 0 {
p = pp.Appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_DI, -int64(8)) p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_DI, -int64(8))
} }
} else { } else {
if *state&ax == 0 { if *state&ax == 0 {
p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0) p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
*state |= ax *state |= ax
} }
p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, cnt/int64(types.RegSize), obj.TYPE_REG, x86.REG_CX, 0) p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, cnt/int64(types.RegSize), obj.TYPE_REG, x86.REG_CX, 0)
p = pp.Appendpp(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0) p = pp.Append(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0)
p = pp.Appendpp(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0) p = pp.Append(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
p = pp.Appendpp(p, x86.ASTOSQ, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0) p = pp.Append(p, x86.ASTOSQ, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
} }
return p return p
} }
func ginsnop(pp *gc.Progs) *obj.Prog { func ginsnop(pp *objw.Progs) *obj.Prog {
// This is a hardware nop (1-byte 0x90) instruction, // This is a hardware nop (1-byte 0x90) instruction,
// even though we describe it as an explicit XCHGL here. // even though we describe it as an explicit XCHGL here.
// Particularly, this does not zero the high 32 bits // Particularly, this does not zero the high 32 bits

View File

@ -5,51 +5,51 @@
package arm package arm
import ( import (
"cmd/compile/internal/gc"
"cmd/compile/internal/ir" "cmd/compile/internal/ir"
"cmd/compile/internal/objw"
"cmd/compile/internal/types" "cmd/compile/internal/types"
"cmd/internal/obj" "cmd/internal/obj"
"cmd/internal/obj/arm" "cmd/internal/obj/arm"
) )
func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, r0 *uint32) *obj.Prog { func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, r0 *uint32) *obj.Prog {
if cnt == 0 { if cnt == 0 {
return p return p
} }
if *r0 == 0 { if *r0 == 0 {
p = pp.Appendpp(p, arm.AMOVW, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, arm.REG_R0, 0) p = pp.Append(p, arm.AMOVW, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, arm.REG_R0, 0)
*r0 = 1 *r0 = 1
} }
if cnt < int64(4*types.PtrSize) { if cnt < int64(4*types.PtrSize) {
for i := int64(0); i < cnt; i += int64(types.PtrSize) { for i := int64(0); i < cnt; i += int64(types.PtrSize) {
p = pp.Appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REGSP, 4+off+i) p = pp.Append(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REGSP, 4+off+i)
} }
} else if cnt <= int64(128*types.PtrSize) { } else if cnt <= int64(128*types.PtrSize) {
p = pp.Appendpp(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0) p = pp.Append(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0)
p.Reg = arm.REGSP p.Reg = arm.REGSP
p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0) p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
p.To.Name = obj.NAME_EXTERN p.To.Name = obj.NAME_EXTERN
p.To.Sym = ir.Syms.Duffzero p.To.Sym = ir.Syms.Duffzero
p.To.Offset = 4 * (128 - cnt/int64(types.PtrSize)) p.To.Offset = 4 * (128 - cnt/int64(types.PtrSize))
} else { } else {
p = pp.Appendpp(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0) p = pp.Append(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0)
p.Reg = arm.REGSP p.Reg = arm.REGSP
p = pp.Appendpp(p, arm.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, arm.REG_R2, 0) p = pp.Append(p, arm.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, arm.REG_R2, 0)
p.Reg = arm.REG_R1 p.Reg = arm.REG_R1
p = pp.Appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REG_R1, 4) p = pp.Append(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REG_R1, 4)
p1 := p p1 := p
p.Scond |= arm.C_PBIT p.Scond |= arm.C_PBIT
p = pp.Appendpp(p, arm.ACMP, obj.TYPE_REG, arm.REG_R1, 0, obj.TYPE_NONE, 0, 0) p = pp.Append(p, arm.ACMP, obj.TYPE_REG, arm.REG_R1, 0, obj.TYPE_NONE, 0, 0)
p.Reg = arm.REG_R2 p.Reg = arm.REG_R2
p = pp.Appendpp(p, arm.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0) p = pp.Append(p, arm.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
gc.Patch(p, p1) p.To.SetTarget(p1)
} }
return p return p
} }
func ginsnop(pp *gc.Progs) *obj.Prog { func ginsnop(pp *objw.Progs) *obj.Prog {
p := pp.Prog(arm.AAND) p := pp.Prog(arm.AAND)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = arm.REG_R0 p.From.Reg = arm.REG_R0

View File

@ -779,7 +779,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p2.Reg = arm.REG_R1 p2.Reg = arm.REG_R1
p3 := s.Prog(arm.ABLE) p3 := s.Prog(arm.ABLE)
p3.To.Type = obj.TYPE_BRANCH p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p) p3.To.SetTarget(p)
case ssa.OpARMLoweredMove: case ssa.OpARMLoweredMove:
// MOVW.P 4(R1), Rtmp // MOVW.P 4(R1), Rtmp
// MOVW.P Rtmp, 4(R2) // MOVW.P Rtmp, 4(R2)
@ -820,7 +820,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p3.Reg = arm.REG_R1 p3.Reg = arm.REG_R1
p4 := s.Prog(arm.ABLE) p4 := s.Prog(arm.ABLE)
p4.To.Type = obj.TYPE_BRANCH p4.To.Type = obj.TYPE_BRANCH
gc.Patch(p4, p) p4.To.SetTarget(p)
case ssa.OpARMEqual, case ssa.OpARMEqual,
ssa.OpARMNotEqual, ssa.OpARMNotEqual,
ssa.OpARMLessThan, ssa.OpARMLessThan,

View File

@ -5,8 +5,8 @@
package arm64 package arm64
import ( import (
"cmd/compile/internal/gc"
"cmd/compile/internal/ir" "cmd/compile/internal/ir"
"cmd/compile/internal/objw"
"cmd/compile/internal/types" "cmd/compile/internal/types"
"cmd/internal/obj" "cmd/internal/obj"
"cmd/internal/obj/arm64" "cmd/internal/obj/arm64"
@ -24,24 +24,24 @@ func padframe(frame int64) int64 {
return frame return frame
} }
func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
if cnt == 0 { if cnt == 0 {
return p return p
} }
if cnt < int64(4*types.PtrSize) { if cnt < int64(4*types.PtrSize) {
for i := int64(0); i < cnt; i += int64(types.PtrSize) { for i := int64(0); i < cnt; i += int64(types.PtrSize) {
p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off+i) p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off+i)
} }
} else if cnt <= int64(128*types.PtrSize) && !darwin { // darwin ld64 cannot handle BR26 reloc with non-zero addend } else if cnt <= int64(128*types.PtrSize) && !darwin { // darwin ld64 cannot handle BR26 reloc with non-zero addend
if cnt%(2*int64(types.PtrSize)) != 0 { if cnt%(2*int64(types.PtrSize)) != 0 {
p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off) p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off)
off += int64(types.PtrSize) off += int64(types.PtrSize)
cnt -= int64(types.PtrSize) cnt -= int64(types.PtrSize)
} }
p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REG_R20, 0) p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REG_R20, 0)
p = pp.Appendpp(p, arm64.AADD, obj.TYPE_CONST, 0, 8+off, obj.TYPE_REG, arm64.REG_R20, 0) p = pp.Append(p, arm64.AADD, obj.TYPE_CONST, 0, 8+off, obj.TYPE_REG, arm64.REG_R20, 0)
p.Reg = arm64.REG_R20 p.Reg = arm64.REG_R20
p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0) p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
p.To.Name = obj.NAME_EXTERN p.To.Name = obj.NAME_EXTERN
p.To.Sym = ir.Syms.Duffzero p.To.Sym = ir.Syms.Duffzero
p.To.Offset = 4 * (64 - cnt/(2*int64(types.PtrSize))) p.To.Offset = 4 * (64 - cnt/(2*int64(types.PtrSize)))
@ -50,26 +50,26 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
// We are at the function entry, where no register is live, so it is okay to clobber // We are at the function entry, where no register is live, so it is okay to clobber
// other registers // other registers
const rtmp = arm64.REG_R20 const rtmp = arm64.REG_R20
p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, rtmp, 0) p = pp.Append(p, arm64.AMOVD, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, rtmp, 0)
p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REGRT1, 0) p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REGRT1, 0)
p = pp.Appendpp(p, arm64.AADD, obj.TYPE_REG, rtmp, 0, obj.TYPE_REG, arm64.REGRT1, 0) p = pp.Append(p, arm64.AADD, obj.TYPE_REG, rtmp, 0, obj.TYPE_REG, arm64.REGRT1, 0)
p.Reg = arm64.REGRT1 p.Reg = arm64.REGRT1
p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, rtmp, 0) p = pp.Append(p, arm64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, rtmp, 0)
p = pp.Appendpp(p, arm64.AADD, obj.TYPE_REG, rtmp, 0, obj.TYPE_REG, arm64.REGRT2, 0) p = pp.Append(p, arm64.AADD, obj.TYPE_REG, rtmp, 0, obj.TYPE_REG, arm64.REGRT2, 0)
p.Reg = arm64.REGRT1 p.Reg = arm64.REGRT1
p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGRT1, int64(types.PtrSize)) p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGRT1, int64(types.PtrSize))
p.Scond = arm64.C_XPRE p.Scond = arm64.C_XPRE
p1 := p p1 := p
p = pp.Appendpp(p, arm64.ACMP, obj.TYPE_REG, arm64.REGRT1, 0, obj.TYPE_NONE, 0, 0) p = pp.Append(p, arm64.ACMP, obj.TYPE_REG, arm64.REGRT1, 0, obj.TYPE_NONE, 0, 0)
p.Reg = arm64.REGRT2 p.Reg = arm64.REGRT2
p = pp.Appendpp(p, arm64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0) p = pp.Append(p, arm64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
gc.Patch(p, p1) p.To.SetTarget(p1)
} }
return p return p
} }
func ginsnop(pp *gc.Progs) *obj.Prog { func ginsnop(pp *objw.Progs) *obj.Prog {
p := pp.Prog(arm64.AHINT) p := pp.Prog(arm64.AHINT)
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
return p return p

View File

@ -582,7 +582,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p2.From.Type = obj.TYPE_REG p2.From.Type = obj.TYPE_REG
p2.From.Reg = arm64.REGTMP p2.From.Reg = arm64.REGTMP
p2.To.Type = obj.TYPE_BRANCH p2.To.Type = obj.TYPE_BRANCH
gc.Patch(p2, p) p2.To.SetTarget(p)
case ssa.OpARM64LoweredAtomicExchange64Variant, case ssa.OpARM64LoweredAtomicExchange64Variant,
ssa.OpARM64LoweredAtomicExchange32Variant: ssa.OpARM64LoweredAtomicExchange32Variant:
swap := arm64.ASWPALD swap := arm64.ASWPALD
@ -636,7 +636,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p3.From.Type = obj.TYPE_REG p3.From.Type = obj.TYPE_REG
p3.From.Reg = arm64.REGTMP p3.From.Reg = arm64.REGTMP
p3.To.Type = obj.TYPE_BRANCH p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p) p3.To.SetTarget(p)
case ssa.OpARM64LoweredAtomicAdd64Variant, case ssa.OpARM64LoweredAtomicAdd64Variant,
ssa.OpARM64LoweredAtomicAdd32Variant: ssa.OpARM64LoweredAtomicAdd32Variant:
// LDADDAL Rarg1, (Rarg0), Rout // LDADDAL Rarg1, (Rarg0), Rout
@ -700,13 +700,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p4.From.Type = obj.TYPE_REG p4.From.Type = obj.TYPE_REG
p4.From.Reg = arm64.REGTMP p4.From.Reg = arm64.REGTMP
p4.To.Type = obj.TYPE_BRANCH p4.To.Type = obj.TYPE_BRANCH
gc.Patch(p4, p) p4.To.SetTarget(p)
p5 := s.Prog(arm64.ACSET) p5 := s.Prog(arm64.ACSET)
p5.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg p5.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg
p5.From.Reg = arm64.COND_EQ p5.From.Reg = arm64.COND_EQ
p5.To.Type = obj.TYPE_REG p5.To.Type = obj.TYPE_REG
p5.To.Reg = out p5.To.Reg = out
gc.Patch(p2, p5) p2.To.SetTarget(p5)
case ssa.OpARM64LoweredAtomicCas64Variant, case ssa.OpARM64LoweredAtomicCas64Variant,
ssa.OpARM64LoweredAtomicCas32Variant: ssa.OpARM64LoweredAtomicCas32Variant:
// Rarg0: ptr // Rarg0: ptr
@ -794,7 +794,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p3.From.Type = obj.TYPE_REG p3.From.Type = obj.TYPE_REG
p3.From.Reg = arm64.REGTMP p3.From.Reg = arm64.REGTMP
p3.To.Type = obj.TYPE_BRANCH p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p) p3.To.SetTarget(p)
case ssa.OpARM64LoweredAtomicAnd8Variant, case ssa.OpARM64LoweredAtomicAnd8Variant,
ssa.OpARM64LoweredAtomicAnd32Variant: ssa.OpARM64LoweredAtomicAnd32Variant:
atomic_clear := arm64.ALDCLRALW atomic_clear := arm64.ALDCLRALW
@ -982,7 +982,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p2.Reg = arm64.REG_R16 p2.Reg = arm64.REG_R16
p3 := s.Prog(arm64.ABLE) p3 := s.Prog(arm64.ABLE)
p3.To.Type = obj.TYPE_BRANCH p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p) p3.To.SetTarget(p)
case ssa.OpARM64DUFFCOPY: case ssa.OpARM64DUFFCOPY:
p := s.Prog(obj.ADUFFCOPY) p := s.Prog(obj.ADUFFCOPY)
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
@ -1015,7 +1015,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p3.Reg = arm64.REG_R16 p3.Reg = arm64.REG_R16
p4 := s.Prog(arm64.ABLE) p4 := s.Prog(arm64.ABLE)
p4.To.Type = obj.TYPE_BRANCH p4.To.Type = obj.TYPE_BRANCH
gc.Patch(p4, p) p4.To.SetTarget(p)
case ssa.OpARM64CALLstatic, ssa.OpARM64CALLclosure, ssa.OpARM64CALLinter: case ssa.OpARM64CALLstatic, ssa.OpARM64CALLclosure, ssa.OpARM64CALLinter:
s.Call(v) s.Call(v)
case ssa.OpARM64LoweredWB: case ssa.OpARM64LoweredWB:

View File

@ -0,0 +1,190 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bitvec
import (
"math/bits"
"cmd/compile/internal/base"
)
const (
wordBits = 32
wordMask = wordBits - 1
wordShift = 5
)
// A BitVec is a bit vector.
type BitVec struct {
N int32 // number of bits in vector
B []uint32 // words holding bits
}
func New(n int32) BitVec {
nword := (n + wordBits - 1) / wordBits
return BitVec{n, make([]uint32, nword)}
}
type Bulk struct {
words []uint32
nbit int32
nword int32
}
func NewBulk(nbit int32, count int32) Bulk {
nword := (nbit + wordBits - 1) / wordBits
size := int64(nword) * int64(count)
if int64(int32(size*4)) != size*4 {
base.Fatalf("bvbulkalloc too big: nbit=%d count=%d nword=%d size=%d", nbit, count, nword, size)
}
return Bulk{
words: make([]uint32, size),
nbit: nbit,
nword: nword,
}
}
func (b *Bulk) Next() BitVec {
out := BitVec{b.nbit, b.words[:b.nword]}
b.words = b.words[b.nword:]
return out
}
func (bv1 BitVec) Eq(bv2 BitVec) bool {
if bv1.N != bv2.N {
base.Fatalf("bvequal: lengths %d and %d are not equal", bv1.N, bv2.N)
}
for i, x := range bv1.B {
if x != bv2.B[i] {
return false
}
}
return true
}
func (dst BitVec) Copy(src BitVec) {
copy(dst.B, src.B)
}
func (bv BitVec) Get(i int32) bool {
if i < 0 || i >= bv.N {
base.Fatalf("bvget: index %d is out of bounds with length %d\n", i, bv.N)
}
mask := uint32(1 << uint(i%wordBits))
return bv.B[i>>wordShift]&mask != 0
}
func (bv BitVec) Set(i int32) {
if i < 0 || i >= bv.N {
base.Fatalf("bvset: index %d is out of bounds with length %d\n", i, bv.N)
}
mask := uint32(1 << uint(i%wordBits))
bv.B[i/wordBits] |= mask
}
func (bv BitVec) Unset(i int32) {
if i < 0 || i >= bv.N {
base.Fatalf("bvunset: index %d is out of bounds with length %d\n", i, bv.N)
}
mask := uint32(1 << uint(i%wordBits))
bv.B[i/wordBits] &^= mask
}
// bvnext returns the smallest index >= i for which bvget(bv, i) == 1.
// If there is no such index, bvnext returns -1.
func (bv BitVec) Next(i int32) int32 {
if i >= bv.N {
return -1
}
// Jump i ahead to next word with bits.
if bv.B[i>>wordShift]>>uint(i&wordMask) == 0 {
i &^= wordMask
i += wordBits
for i < bv.N && bv.B[i>>wordShift] == 0 {
i += wordBits
}
}
if i >= bv.N {
return -1
}
// Find 1 bit.
w := bv.B[i>>wordShift] >> uint(i&wordMask)
i += int32(bits.TrailingZeros32(w))
return i
}
func (bv BitVec) IsEmpty() bool {
for _, x := range bv.B {
if x != 0 {
return false
}
}
return true
}
func (bv BitVec) Not() {
for i, x := range bv.B {
bv.B[i] = ^x
}
}
// union
func (dst BitVec) Or(src1, src2 BitVec) {
if len(src1.B) == 0 {
return
}
_, _ = dst.B[len(src1.B)-1], src2.B[len(src1.B)-1] // hoist bounds checks out of the loop
for i, x := range src1.B {
dst.B[i] = x | src2.B[i]
}
}
// intersection
func (dst BitVec) And(src1, src2 BitVec) {
if len(src1.B) == 0 {
return
}
_, _ = dst.B[len(src1.B)-1], src2.B[len(src1.B)-1] // hoist bounds checks out of the loop
for i, x := range src1.B {
dst.B[i] = x & src2.B[i]
}
}
// difference
func (dst BitVec) AndNot(src1, src2 BitVec) {
if len(src1.B) == 0 {
return
}
_, _ = dst.B[len(src1.B)-1], src2.B[len(src1.B)-1] // hoist bounds checks out of the loop
for i, x := range src1.B {
dst.B[i] = x &^ src2.B[i]
}
}
func (bv BitVec) String() string {
s := make([]byte, 2+bv.N)
copy(s, "#*")
for i := int32(0); i < bv.N; i++ {
ch := byte('0')
if bv.Get(i) {
ch = '1'
}
s[2+i] = ch
}
return string(s)
}
func (bv BitVec) Clear() {
for i := range bv.B {
bv.B[i] = 0
}
}

View File

@ -7,6 +7,7 @@ package gc
import ( import (
"cmd/compile/internal/base" "cmd/compile/internal/base"
"cmd/compile/internal/ir" "cmd/compile/internal/ir"
"cmd/compile/internal/objw"
"cmd/compile/internal/typecheck" "cmd/compile/internal/typecheck"
"cmd/compile/internal/types" "cmd/compile/internal/types"
"cmd/internal/obj" "cmd/internal/obj"
@ -110,9 +111,9 @@ func genhash(t *types.Type) *obj.LSym {
memhashvarlen = typecheck.LookupRuntimeFunc("memhash_varlen") memhashvarlen = typecheck.LookupRuntimeFunc("memhash_varlen")
} }
ot := 0 ot := 0
ot = dsymptr(closure, ot, memhashvarlen, 0) ot = objw.SymPtr(closure, ot, memhashvarlen, 0)
ot = duintptr(closure, ot, uint64(t.Width)) // size encoded in closure ot = objw.Uintptr(closure, ot, uint64(t.Width)) // size encoded in closure
ggloblsym(closure, int32(ot), obj.DUPOK|obj.RODATA) objw.Global(closure, int32(ot), obj.DUPOK|obj.RODATA)
return closure return closure
case types.ASPECIAL: case types.ASPECIAL:
break break
@ -253,8 +254,8 @@ func genhash(t *types.Type) *obj.LSym {
// Build closure. It doesn't close over any variables, so // Build closure. It doesn't close over any variables, so
// it contains just the function pointer. // it contains just the function pointer.
dsymptr(closure, 0, sym.Linksym(), 0) objw.SymPtr(closure, 0, sym.Linksym(), 0)
ggloblsym(closure, int32(types.PtrSize), obj.DUPOK|obj.RODATA) objw.Global(closure, int32(types.PtrSize), obj.DUPOK|obj.RODATA)
return closure return closure
} }
@ -302,8 +303,8 @@ func sysClosure(name string) *obj.LSym {
s := typecheck.LookupRuntimeVar(name + "·f") s := typecheck.LookupRuntimeVar(name + "·f")
if len(s.P) == 0 { if len(s.P) == 0 {
f := typecheck.LookupRuntimeFunc(name) f := typecheck.LookupRuntimeFunc(name)
dsymptr(s, 0, f, 0) objw.SymPtr(s, 0, f, 0)
ggloblsym(s, int32(types.PtrSize), obj.DUPOK|obj.RODATA) objw.Global(s, int32(types.PtrSize), obj.DUPOK|obj.RODATA)
} }
return s return s
} }
@ -353,9 +354,9 @@ func geneq(t *types.Type) *obj.LSym {
memequalvarlen = typecheck.LookupRuntimeVar("memequal_varlen") // asm func memequalvarlen = typecheck.LookupRuntimeVar("memequal_varlen") // asm func
} }
ot := 0 ot := 0
ot = dsymptr(closure, ot, memequalvarlen, 0) ot = objw.SymPtr(closure, ot, memequalvarlen, 0)
ot = duintptr(closure, ot, uint64(t.Width)) ot = objw.Uintptr(closure, ot, uint64(t.Width))
ggloblsym(closure, int32(ot), obj.DUPOK|obj.RODATA) objw.Global(closure, int32(ot), obj.DUPOK|obj.RODATA)
return closure return closure
case types.ASPECIAL: case types.ASPECIAL:
break break
@ -632,8 +633,8 @@ func geneq(t *types.Type) *obj.LSym {
typecheck.Target.Decls = append(typecheck.Target.Decls, fn) typecheck.Target.Decls = append(typecheck.Target.Decls, fn)
// Generate a closure which points at the function we just generated. // Generate a closure which points at the function we just generated.
dsymptr(closure, 0, sym.Linksym(), 0) objw.SymPtr(closure, 0, sym.Linksym(), 0)
ggloblsym(closure, int32(types.PtrSize), obj.DUPOK|obj.RODATA) objw.Global(closure, int32(types.PtrSize), obj.DUPOK|obj.RODATA)
return closure return closure
} }

View File

@ -1,280 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"math/bits"
"cmd/compile/internal/base"
)
const (
wordBits = 32
wordMask = wordBits - 1
wordShift = 5
)
// A bvec is a bit vector.
type bvec struct {
n int32 // number of bits in vector
b []uint32 // words holding bits
}
func bvalloc(n int32) bvec {
nword := (n + wordBits - 1) / wordBits
return bvec{n, make([]uint32, nword)}
}
type bulkBvec struct {
words []uint32
nbit int32
nword int32
}
func bvbulkalloc(nbit int32, count int32) bulkBvec {
nword := (nbit + wordBits - 1) / wordBits
size := int64(nword) * int64(count)
if int64(int32(size*4)) != size*4 {
base.Fatalf("bvbulkalloc too big: nbit=%d count=%d nword=%d size=%d", nbit, count, nword, size)
}
return bulkBvec{
words: make([]uint32, size),
nbit: nbit,
nword: nword,
}
}
func (b *bulkBvec) next() bvec {
out := bvec{b.nbit, b.words[:b.nword]}
b.words = b.words[b.nword:]
return out
}
func (bv1 bvec) Eq(bv2 bvec) bool {
if bv1.n != bv2.n {
base.Fatalf("bvequal: lengths %d and %d are not equal", bv1.n, bv2.n)
}
for i, x := range bv1.b {
if x != bv2.b[i] {
return false
}
}
return true
}
func (dst bvec) Copy(src bvec) {
copy(dst.b, src.b)
}
func (bv bvec) Get(i int32) bool {
if i < 0 || i >= bv.n {
base.Fatalf("bvget: index %d is out of bounds with length %d\n", i, bv.n)
}
mask := uint32(1 << uint(i%wordBits))
return bv.b[i>>wordShift]&mask != 0
}
func (bv bvec) Set(i int32) {
if i < 0 || i >= bv.n {
base.Fatalf("bvset: index %d is out of bounds with length %d\n", i, bv.n)
}
mask := uint32(1 << uint(i%wordBits))
bv.b[i/wordBits] |= mask
}
func (bv bvec) Unset(i int32) {
if i < 0 || i >= bv.n {
base.Fatalf("bvunset: index %d is out of bounds with length %d\n", i, bv.n)
}
mask := uint32(1 << uint(i%wordBits))
bv.b[i/wordBits] &^= mask
}
// bvnext returns the smallest index >= i for which bvget(bv, i) == 1.
// If there is no such index, bvnext returns -1.
func (bv bvec) Next(i int32) int32 {
if i >= bv.n {
return -1
}
// Jump i ahead to next word with bits.
if bv.b[i>>wordShift]>>uint(i&wordMask) == 0 {
i &^= wordMask
i += wordBits
for i < bv.n && bv.b[i>>wordShift] == 0 {
i += wordBits
}
}
if i >= bv.n {
return -1
}
// Find 1 bit.
w := bv.b[i>>wordShift] >> uint(i&wordMask)
i += int32(bits.TrailingZeros32(w))
return i
}
func (bv bvec) IsEmpty() bool {
for _, x := range bv.b {
if x != 0 {
return false
}
}
return true
}
func (bv bvec) Not() {
for i, x := range bv.b {
bv.b[i] = ^x
}
}
// union
func (dst bvec) Or(src1, src2 bvec) {
if len(src1.b) == 0 {
return
}
_, _ = dst.b[len(src1.b)-1], src2.b[len(src1.b)-1] // hoist bounds checks out of the loop
for i, x := range src1.b {
dst.b[i] = x | src2.b[i]
}
}
// intersection
func (dst bvec) And(src1, src2 bvec) {
if len(src1.b) == 0 {
return
}
_, _ = dst.b[len(src1.b)-1], src2.b[len(src1.b)-1] // hoist bounds checks out of the loop
for i, x := range src1.b {
dst.b[i] = x & src2.b[i]
}
}
// difference
func (dst bvec) AndNot(src1, src2 bvec) {
if len(src1.b) == 0 {
return
}
_, _ = dst.b[len(src1.b)-1], src2.b[len(src1.b)-1] // hoist bounds checks out of the loop
for i, x := range src1.b {
dst.b[i] = x &^ src2.b[i]
}
}
func (bv bvec) String() string {
s := make([]byte, 2+bv.n)
copy(s, "#*")
for i := int32(0); i < bv.n; i++ {
ch := byte('0')
if bv.Get(i) {
ch = '1'
}
s[2+i] = ch
}
return string(s)
}
func (bv bvec) Clear() {
for i := range bv.b {
bv.b[i] = 0
}
}
// FNV-1 hash function constants.
const (
H0 = 2166136261
Hp = 16777619
)
func hashbitmap(h uint32, bv bvec) uint32 {
n := int((bv.n + 31) / 32)
for i := 0; i < n; i++ {
w := bv.b[i]
h = (h * Hp) ^ (w & 0xff)
h = (h * Hp) ^ ((w >> 8) & 0xff)
h = (h * Hp) ^ ((w >> 16) & 0xff)
h = (h * Hp) ^ ((w >> 24) & 0xff)
}
return h
}
// bvecSet is a set of bvecs, in initial insertion order.
type bvecSet struct {
index []int // hash -> uniq index. -1 indicates empty slot.
uniq []bvec // unique bvecs, in insertion order
}
func (m *bvecSet) grow() {
// Allocate new index.
n := len(m.index) * 2
if n == 0 {
n = 32
}
newIndex := make([]int, n)
for i := range newIndex {
newIndex[i] = -1
}
// Rehash into newIndex.
for i, bv := range m.uniq {
h := hashbitmap(H0, bv) % uint32(len(newIndex))
for {
j := newIndex[h]
if j < 0 {
newIndex[h] = i
break
}
h++
if h == uint32(len(newIndex)) {
h = 0
}
}
}
m.index = newIndex
}
// add adds bv to the set and returns its index in m.extractUniqe.
// The caller must not modify bv after this.
func (m *bvecSet) add(bv bvec) int {
if len(m.uniq)*4 >= len(m.index) {
m.grow()
}
index := m.index
h := hashbitmap(H0, bv) % uint32(len(index))
for {
j := index[h]
if j < 0 {
// New bvec.
index[h] = len(m.uniq)
m.uniq = append(m.uniq, bv)
return len(m.uniq) - 1
}
jlive := m.uniq[j]
if bv.Eq(jlive) {
// Existing bvec.
return j
}
h++
if h == uint32(len(index)) {
h = 0
}
}
}
// extractUniqe returns this slice of unique bit vectors in m, as
// indexed by the result of bvecSet.add.
func (m *bvecSet) extractUniqe() []bvec {
return m.uniq
}

View File

@ -0,0 +1,97 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import "cmd/compile/internal/bitvec"
// FNV-1 hash function constants.
const (
h0 = 2166136261
hp = 16777619
)
// bvecSet is a set of bvecs, in initial insertion order.
type bvecSet struct {
index []int // hash -> uniq index. -1 indicates empty slot.
uniq []bitvec.BitVec // unique bvecs, in insertion order
}
func (m *bvecSet) grow() {
// Allocate new index.
n := len(m.index) * 2
if n == 0 {
n = 32
}
newIndex := make([]int, n)
for i := range newIndex {
newIndex[i] = -1
}
// Rehash into newIndex.
for i, bv := range m.uniq {
h := hashbitmap(h0, bv) % uint32(len(newIndex))
for {
j := newIndex[h]
if j < 0 {
newIndex[h] = i
break
}
h++
if h == uint32(len(newIndex)) {
h = 0
}
}
}
m.index = newIndex
}
// add adds bv to the set and returns its index in m.extractUniqe.
// The caller must not modify bv after this.
func (m *bvecSet) add(bv bitvec.BitVec) int {
if len(m.uniq)*4 >= len(m.index) {
m.grow()
}
index := m.index
h := hashbitmap(h0, bv) % uint32(len(index))
for {
j := index[h]
if j < 0 {
// New bvec.
index[h] = len(m.uniq)
m.uniq = append(m.uniq, bv)
return len(m.uniq) - 1
}
jlive := m.uniq[j]
if bv.Eq(jlive) {
// Existing bvec.
return j
}
h++
if h == uint32(len(index)) {
h = 0
}
}
}
// extractUnique returns this slice of unique bit vectors in m, as
// indexed by the result of bvecSet.add.
func (m *bvecSet) extractUnique() []bitvec.BitVec {
return m.uniq
}
func hashbitmap(h uint32, bv bitvec.BitVec) uint32 {
n := int((bv.N + 31) / 32)
for i := 0; i < n; i++ {
w := bv.B[i]
h = (h * hp) ^ (w & 0xff)
h = (h * hp) ^ ((w >> 8) & 0xff)
h = (h * hp) ^ ((w >> 16) & 0xff)
h = (h * hp) ^ ((w >> 24) & 0xff)
}
return h
}

View File

@ -7,6 +7,7 @@ package gc
import ( import (
"cmd/compile/internal/base" "cmd/compile/internal/base"
"cmd/compile/internal/ir" "cmd/compile/internal/ir"
"cmd/compile/internal/objw"
"cmd/compile/internal/syntax" "cmd/compile/internal/syntax"
"cmd/compile/internal/typecheck" "cmd/compile/internal/typecheck"
"cmd/compile/internal/types" "cmd/compile/internal/types"
@ -206,19 +207,19 @@ func initEmbed(v *ir.Name) {
} }
sym := v.Sym().Linksym() sym := v.Sym().Linksym()
off := 0 off := 0
off = dsymptr(sym, off, fsym, 0) // data string off = objw.SymPtr(sym, off, fsym, 0) // data string
off = duintptr(sym, off, uint64(size)) // len off = objw.Uintptr(sym, off, uint64(size)) // len
if kind == embedBytes { if kind == embedBytes {
duintptr(sym, off, uint64(size)) // cap for slice objw.Uintptr(sym, off, uint64(size)) // cap for slice
} }
case embedFiles: case embedFiles:
slicedata := base.Ctxt.Lookup(`"".` + v.Sym().Name + `.files`) slicedata := base.Ctxt.Lookup(`"".` + v.Sym().Name + `.files`)
off := 0 off := 0
// []files pointed at by Files // []files pointed at by Files
off = dsymptr(slicedata, off, slicedata, 3*types.PtrSize) // []file, pointing just past slice off = objw.SymPtr(slicedata, off, slicedata, 3*types.PtrSize) // []file, pointing just past slice
off = duintptr(slicedata, off, uint64(len(files))) off = objw.Uintptr(slicedata, off, uint64(len(files)))
off = duintptr(slicedata, off, uint64(len(files))) off = objw.Uintptr(slicedata, off, uint64(len(files)))
// embed/embed.go type file is: // embed/embed.go type file is:
// name string // name string
@ -228,25 +229,25 @@ func initEmbed(v *ir.Name) {
const hashSize = 16 const hashSize = 16
hash := make([]byte, hashSize) hash := make([]byte, hashSize)
for _, file := range files { for _, file := range files {
off = dsymptr(slicedata, off, stringsym(v.Pos(), file), 0) // file string off = objw.SymPtr(slicedata, off, stringsym(v.Pos(), file), 0) // file string
off = duintptr(slicedata, off, uint64(len(file))) off = objw.Uintptr(slicedata, off, uint64(len(file)))
if strings.HasSuffix(file, "/") { if strings.HasSuffix(file, "/") {
// entry for directory - no data // entry for directory - no data
off = duintptr(slicedata, off, 0) off = objw.Uintptr(slicedata, off, 0)
off = duintptr(slicedata, off, 0) off = objw.Uintptr(slicedata, off, 0)
off += hashSize off += hashSize
} else { } else {
fsym, size, err := fileStringSym(v.Pos(), base.Flag.Cfg.Embed.Files[file], true, hash) fsym, size, err := fileStringSym(v.Pos(), base.Flag.Cfg.Embed.Files[file], true, hash)
if err != nil { if err != nil {
base.ErrorfAt(v.Pos(), "embed %s: %v", file, err) base.ErrorfAt(v.Pos(), "embed %s: %v", file, err)
} }
off = dsymptr(slicedata, off, fsym, 0) // data string off = objw.SymPtr(slicedata, off, fsym, 0) // data string
off = duintptr(slicedata, off, uint64(size)) off = objw.Uintptr(slicedata, off, uint64(size))
off = int(slicedata.WriteBytes(base.Ctxt, int64(off), hash)) off = int(slicedata.WriteBytes(base.Ctxt, int64(off), hash))
} }
} }
ggloblsym(slicedata, int32(off), obj.RODATA|obj.LOCAL) objw.Global(slicedata, int32(off), obj.RODATA|obj.LOCAL)
sym := v.Sym().Linksym() sym := v.Sym().Linksym()
dsymptr(sym, 0, slicedata, 0) objw.SymPtr(sym, 0, slicedata, 0)
} }
} }

View File

@ -5,6 +5,7 @@
package gc package gc
import ( import (
"cmd/compile/internal/objw"
"cmd/compile/internal/ssa" "cmd/compile/internal/ssa"
"cmd/compile/internal/types" "cmd/compile/internal/types"
"cmd/internal/obj" "cmd/internal/obj"
@ -33,10 +34,10 @@ type Arch struct {
// ZeroRange zeroes a range of memory on stack. It is only inserted // ZeroRange zeroes a range of memory on stack. It is only inserted
// at function entry, and it is ok to clobber registers. // at function entry, and it is ok to clobber registers.
ZeroRange func(*Progs, *obj.Prog, int64, int64, *uint32) *obj.Prog ZeroRange func(*objw.Progs, *obj.Prog, int64, int64, *uint32) *obj.Prog
Ginsnop func(*Progs) *obj.Prog Ginsnop func(*objw.Progs) *obj.Prog
Ginsnopdefer func(*Progs) *obj.Prog // special ginsnop for deferreturn Ginsnopdefer func(*objw.Progs) *obj.Prog // special ginsnop for deferreturn
// SSAMarkMoves marks any MOVXconst ops that need to avoid clobbering flags. // SSAMarkMoves marks any MOVXconst ops that need to avoid clobbering flags.
SSAMarkMoves func(*SSAGenState, *ssa.Block) SSAMarkMoves func(*SSAGenState, *ssa.Block)

View File

@ -33,164 +33,14 @@ package gc
import ( import (
"cmd/compile/internal/base" "cmd/compile/internal/base"
"cmd/compile/internal/ir" "cmd/compile/internal/ir"
"cmd/compile/internal/ssa"
"cmd/compile/internal/typecheck" "cmd/compile/internal/typecheck"
"cmd/compile/internal/types" "cmd/compile/internal/types"
"cmd/internal/obj" "cmd/internal/obj"
"cmd/internal/objabi" "cmd/internal/objabi"
"cmd/internal/src"
"fmt" "fmt"
"os" "os"
) )
var sharedProgArray = new([10000]obj.Prog) // *T instead of T to work around issue 19839
// Progs accumulates Progs for a function and converts them into machine code.
type Progs struct {
Text *obj.Prog // ATEXT Prog for this function
next *obj.Prog // next Prog
pc int64 // virtual PC; count of Progs
pos src.XPos // position to use for new Progs
curfn *ir.Func // fn these Progs are for
progcache []obj.Prog // local progcache
cacheidx int // first free element of progcache
nextLive LivenessIndex // liveness index for the next Prog
prevLive LivenessIndex // last emitted liveness index
}
// newProgs returns a new Progs for fn.
// worker indicates which of the backend workers will use the Progs.
func newProgs(fn *ir.Func, worker int) *Progs {
pp := new(Progs)
if base.Ctxt.CanReuseProgs() {
sz := len(sharedProgArray) / base.Flag.LowerC
pp.progcache = sharedProgArray[sz*worker : sz*(worker+1)]
}
pp.curfn = fn
// prime the pump
pp.next = pp.NewProg()
pp.clearp(pp.next)
pp.pos = fn.Pos()
pp.settext(fn)
// PCDATA tables implicitly start with index -1.
pp.prevLive = LivenessIndex{-1, false}
pp.nextLive = pp.prevLive
return pp
}
func (pp *Progs) NewProg() *obj.Prog {
var p *obj.Prog
if pp.cacheidx < len(pp.progcache) {
p = &pp.progcache[pp.cacheidx]
pp.cacheidx++
} else {
p = new(obj.Prog)
}
p.Ctxt = base.Ctxt
return p
}
// Flush converts from pp to machine code.
func (pp *Progs) Flush() {
plist := &obj.Plist{Firstpc: pp.Text, Curfn: pp.curfn}
obj.Flushplist(base.Ctxt, plist, pp.NewProg, base.Ctxt.Pkgpath)
}
// Free clears pp and any associated resources.
func (pp *Progs) Free() {
if base.Ctxt.CanReuseProgs() {
// Clear progs to enable GC and avoid abuse.
s := pp.progcache[:pp.cacheidx]
for i := range s {
s[i] = obj.Prog{}
}
}
// Clear pp to avoid abuse.
*pp = Progs{}
}
// Prog adds a Prog with instruction As to pp.
func (pp *Progs) Prog(as obj.As) *obj.Prog {
if pp.nextLive.StackMapValid() && pp.nextLive.stackMapIndex != pp.prevLive.stackMapIndex {
// Emit stack map index change.
idx := pp.nextLive.stackMapIndex
pp.prevLive.stackMapIndex = idx
p := pp.Prog(obj.APCDATA)
Addrconst(&p.From, objabi.PCDATA_StackMapIndex)
Addrconst(&p.To, int64(idx))
}
if pp.nextLive.isUnsafePoint != pp.prevLive.isUnsafePoint {
// Emit unsafe-point marker.
pp.prevLive.isUnsafePoint = pp.nextLive.isUnsafePoint
p := pp.Prog(obj.APCDATA)
Addrconst(&p.From, objabi.PCDATA_UnsafePoint)
if pp.nextLive.isUnsafePoint {
Addrconst(&p.To, objabi.PCDATA_UnsafePointUnsafe)
} else {
Addrconst(&p.To, objabi.PCDATA_UnsafePointSafe)
}
}
p := pp.next
pp.next = pp.NewProg()
pp.clearp(pp.next)
p.Link = pp.next
if !pp.pos.IsKnown() && base.Flag.K != 0 {
base.Warn("prog: unknown position (line 0)")
}
p.As = as
p.Pos = pp.pos
if pp.pos.IsStmt() == src.PosIsStmt {
// Clear IsStmt for later Progs at this pos provided that as can be marked as a stmt
if ssa.LosesStmtMark(as) {
return p
}
pp.pos = pp.pos.WithNotStmt()
}
return p
}
func (pp *Progs) clearp(p *obj.Prog) {
obj.Nopout(p)
p.As = obj.AEND
p.Pc = pp.pc
pp.pc++
}
func (pp *Progs) Appendpp(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int16, foffset int64, ttype obj.AddrType, treg int16, toffset int64) *obj.Prog {
q := pp.NewProg()
pp.clearp(q)
q.As = as
q.Pos = p.Pos
q.From.Type = ftype
q.From.Reg = freg
q.From.Offset = foffset
q.To.Type = ttype
q.To.Reg = treg
q.To.Offset = toffset
q.Link = p.Link
p.Link = q
return q
}
func (pp *Progs) settext(fn *ir.Func) {
if pp.Text != nil {
base.Fatalf("Progs.settext called twice")
}
ptxt := pp.Prog(obj.ATEXT)
pp.Text = ptxt
fn.LSym.Func().Text = ptxt
ptxt.From.Type = obj.TYPE_MEM
ptxt.From.Name = obj.NAME_EXTERN
ptxt.From.Sym = fn.LSym
}
// makeABIWrapper creates a new function that wraps a cross-ABI call // makeABIWrapper creates a new function that wraps a cross-ABI call
// to "f". The wrapper is marked as an ABIWRAPPER. // to "f". The wrapper is marked as an ABIWRAPPER.
func makeABIWrapper(f *ir.Func, wrapperABI obj.ABI) { func makeABIWrapper(f *ir.Func, wrapperABI obj.ABI) {
@ -426,41 +276,3 @@ func setupTextLSym(f *ir.Func, flag int) {
base.Ctxt.InitTextSym(f.LSym, flag) base.Ctxt.InitTextSym(f.LSym, flag)
} }
func ggloblnod(nam ir.Node) {
s := nam.Sym().Linksym()
s.Gotype = ngotype(nam).Linksym()
flags := 0
if nam.Name().Readonly() {
flags = obj.RODATA
}
if nam.Type() != nil && !nam.Type().HasPointers() {
flags |= obj.NOPTR
}
base.Ctxt.Globl(s, nam.Type().Width, flags)
if nam.Name().LibfuzzerExtraCounter() {
s.Type = objabi.SLIBFUZZER_EXTRA_COUNTER
}
if nam.Sym().Linkname != "" {
// Make sure linkname'd symbol is non-package. When a symbol is
// both imported and linkname'd, s.Pkg may not set to "_" in
// types.Sym.Linksym because LSym already exists. Set it here.
s.Pkg = "_"
}
}
func ggloblsym(s *obj.LSym, width int32, flags int16) {
if flags&obj.LOCAL != 0 {
s.Set(obj.AttrLocal, true)
flags &^= obj.LOCAL
}
base.Ctxt.Globl(s, int64(width), int(flags))
}
func Addrconst(a *obj.Addr, v int64) {
a.SetConst(v)
}
func Patch(p *obj.Prog, to *obj.Prog) {
p.To.SetTarget(to)
}

View File

@ -7,6 +7,7 @@ package gc
import ( import (
"cmd/compile/internal/base" "cmd/compile/internal/base"
"cmd/compile/internal/ir" "cmd/compile/internal/ir"
"cmd/compile/internal/objw"
"cmd/compile/internal/typecheck" "cmd/compile/internal/typecheck"
"cmd/compile/internal/types" "cmd/compile/internal/types"
"cmd/internal/obj" "cmd/internal/obj"
@ -100,17 +101,17 @@ func fninit() *ir.Name {
sym.Def = task sym.Def = task
lsym := sym.Linksym() lsym := sym.Linksym()
ot := 0 ot := 0
ot = duintptr(lsym, ot, 0) // state: not initialized yet ot = objw.Uintptr(lsym, ot, 0) // state: not initialized yet
ot = duintptr(lsym, ot, uint64(len(deps))) ot = objw.Uintptr(lsym, ot, uint64(len(deps)))
ot = duintptr(lsym, ot, uint64(len(fns))) ot = objw.Uintptr(lsym, ot, uint64(len(fns)))
for _, d := range deps { for _, d := range deps {
ot = dsymptr(lsym, ot, d, 0) ot = objw.SymPtr(lsym, ot, d, 0)
} }
for _, f := range fns { for _, f := range fns {
ot = dsymptr(lsym, ot, f, 0) ot = objw.SymPtr(lsym, ot, f, 0)
} }
// An initTask has pointers, but none into the Go heap. // An initTask has pointers, but none into the Go heap.
// It's not quite read only, the state field must be modifiable. // It's not quite read only, the state field must be modifiable.
ggloblsym(lsym, int32(ot), obj.NOPTR) objw.Global(lsym, int32(ot), obj.NOPTR)
return task return task
} }

View File

@ -7,6 +7,7 @@ package gc
import ( import (
"cmd/compile/internal/base" "cmd/compile/internal/base"
"cmd/compile/internal/ir" "cmd/compile/internal/ir"
"cmd/compile/internal/objw"
"cmd/compile/internal/typecheck" "cmd/compile/internal/typecheck"
"cmd/compile/internal/types" "cmd/compile/internal/types"
"cmd/internal/bio" "cmd/internal/bio"
@ -160,7 +161,7 @@ func dumpdata() {
if zerosize > 0 { if zerosize > 0 {
zero := ir.Pkgs.Map.Lookup("zero") zero := ir.Pkgs.Map.Lookup("zero")
ggloblsym(zero.Linksym(), int32(zerosize), obj.DUPOK|obj.RODATA) objw.Global(zero.Linksym(), int32(zerosize), obj.DUPOK|obj.RODATA)
} }
addGCLocals() addGCLocals()
@ -281,8 +282,8 @@ func dumpfuncsyms() {
}) })
for _, s := range funcsyms { for _, s := range funcsyms {
sf := s.Pkg.Lookup(ir.FuncSymName(s)).Linksym() sf := s.Pkg.Lookup(ir.FuncSymName(s)).Linksym()
dsymptr(sf, 0, s.Linksym(), 0) objw.SymPtr(sf, 0, s.Linksym(), 0)
ggloblsym(sf, int32(types.PtrSize), obj.DUPOK|obj.RODATA) objw.Global(sf, int32(types.PtrSize), obj.DUPOK|obj.RODATA)
} }
} }
@ -298,53 +299,20 @@ func addGCLocals() {
} }
for _, gcsym := range []*obj.LSym{fn.GCArgs, fn.GCLocals} { for _, gcsym := range []*obj.LSym{fn.GCArgs, fn.GCLocals} {
if gcsym != nil && !gcsym.OnList() { if gcsym != nil && !gcsym.OnList() {
ggloblsym(gcsym, int32(len(gcsym.P)), obj.RODATA|obj.DUPOK) objw.Global(gcsym, int32(len(gcsym.P)), obj.RODATA|obj.DUPOK)
} }
} }
if x := fn.StackObjects; x != nil { if x := fn.StackObjects; x != nil {
attr := int16(obj.RODATA) attr := int16(obj.RODATA)
ggloblsym(x, int32(len(x.P)), attr) objw.Global(x, int32(len(x.P)), attr)
x.Set(obj.AttrStatic, true) x.Set(obj.AttrStatic, true)
} }
if x := fn.OpenCodedDeferInfo; x != nil { if x := fn.OpenCodedDeferInfo; x != nil {
ggloblsym(x, int32(len(x.P)), obj.RODATA|obj.DUPOK) objw.Global(x, int32(len(x.P)), obj.RODATA|obj.DUPOK)
} }
} }
} }
func duintxx(s *obj.LSym, off int, v uint64, wid int) int {
if off&(wid-1) != 0 {
base.Fatalf("duintxxLSym: misaligned: v=%d wid=%d off=%d", v, wid, off)
}
s.WriteInt(base.Ctxt, int64(off), wid, int64(v))
return off + wid
}
func duint8(s *obj.LSym, off int, v uint8) int {
return duintxx(s, off, uint64(v), 1)
}
func duint16(s *obj.LSym, off int, v uint16) int {
return duintxx(s, off, uint64(v), 2)
}
func duint32(s *obj.LSym, off int, v uint32) int {
return duintxx(s, off, uint64(v), 4)
}
func duintptr(s *obj.LSym, off int, v uint64) int {
return duintxx(s, off, v, types.PtrSize)
}
func dbvec(s *obj.LSym, off int, bv bvec) int {
// Runtime reads the bitmaps as byte arrays. Oblige.
for j := 0; int32(j) < bv.n; j += 8 {
word := bv.b[j/32]
off = duint8(s, off, uint8(word>>(uint(j)%32)))
}
return off
}
const ( const (
stringSymPrefix = "go.string." stringSymPrefix = "go.string."
stringSymPattern = ".gostring.%d.%x" stringSymPattern = ".gostring.%d.%x"
@ -370,7 +338,7 @@ func stringsym(pos src.XPos, s string) (data *obj.LSym) {
symdata := base.Ctxt.Lookup(stringSymPrefix + symname) symdata := base.Ctxt.Lookup(stringSymPrefix + symname)
if !symdata.OnList() { if !symdata.OnList() {
off := dstringdata(symdata, 0, s, pos, "string") off := dstringdata(symdata, 0, s, pos, "string")
ggloblsym(symdata, int32(off), obj.DUPOK|obj.RODATA|obj.LOCAL) objw.Global(symdata, int32(off), obj.DUPOK|obj.RODATA|obj.LOCAL)
symdata.Set(obj.AttrContentAddressable, true) symdata.Set(obj.AttrContentAddressable, true)
} }
@ -450,7 +418,7 @@ func fileStringSym(pos src.XPos, file string, readonly bool, hash []byte) (*obj.
info := symdata.NewFileInfo() info := symdata.NewFileInfo()
info.Name = file info.Name = file
info.Size = size info.Size = size
ggloblsym(symdata, int32(size), obj.DUPOK|obj.RODATA|obj.LOCAL) objw.Global(symdata, int32(size), obj.DUPOK|obj.RODATA|obj.LOCAL)
// Note: AttrContentAddressable cannot be set here, // Note: AttrContentAddressable cannot be set here,
// because the content-addressable-handling code // because the content-addressable-handling code
// does not know about file symbols. // does not know about file symbols.
@ -480,7 +448,7 @@ func slicedata(pos src.XPos, s string) *ir.Name {
lsym := sym.Linksym() lsym := sym.Linksym()
off := dstringdata(lsym, 0, s, pos, "slice") off := dstringdata(lsym, 0, s, pos, "slice")
ggloblsym(lsym, int32(off), obj.NOPTR|obj.LOCAL) objw.Global(lsym, int32(off), obj.NOPTR|obj.LOCAL)
return symnode return symnode
} }
@ -505,25 +473,6 @@ func dstringdata(s *obj.LSym, off int, t string, pos src.XPos, what string) int
return off + len(t) return off + len(t)
} }
func dsymptr(s *obj.LSym, off int, x *obj.LSym, xoff int) int {
off = int(types.Rnd(int64(off), int64(types.PtrSize)))
s.WriteAddr(base.Ctxt, int64(off), types.PtrSize, x, int64(xoff))
off += types.PtrSize
return off
}
func dsymptrOff(s *obj.LSym, off int, x *obj.LSym) int {
s.WriteOff(base.Ctxt, int64(off), x, 0)
off += 4
return off
}
func dsymptrWeakOff(s *obj.LSym, off int, x *obj.LSym) int {
s.WriteWeakOff(base.Ctxt, int64(off), x, 0)
off += 4
return off
}
// slicesym writes a static slice symbol {&arr, lencap, lencap} to n+noff. // slicesym writes a static slice symbol {&arr, lencap, lencap} to n+noff.
// slicesym does not modify n. // slicesym does not modify n.
func slicesym(n *ir.Name, noff int64, arr *ir.Name, lencap int64) { func slicesym(n *ir.Name, noff int64, arr *ir.Name, lencap int64) {
@ -623,3 +572,25 @@ func litsym(n *ir.Name, noff int64, c ir.Node, wid int) {
base.Fatalf("litsym unhandled OLITERAL %v", c) base.Fatalf("litsym unhandled OLITERAL %v", c)
} }
} }
func ggloblnod(nam ir.Node) {
s := nam.Sym().Linksym()
s.Gotype = ngotype(nam).Linksym()
flags := 0
if nam.Name().Readonly() {
flags = obj.RODATA
}
if nam.Type() != nil && !nam.Type().HasPointers() {
flags |= obj.NOPTR
}
base.Ctxt.Globl(s, nam.Type().Width, flags)
if nam.Name().LibfuzzerExtraCounter() {
s.Type = objabi.SLIBFUZZER_EXTRA_COUNTER
}
if nam.Sym().Linkname != "" {
// Make sure linkname'd symbol is non-package. When a symbol is
// both imported and linkname'd, s.Pkg may not set to "_" in
// types.Sym.Linksym because LSym already exists. Set it here.
s.Pkg = "_"
}
}

View File

@ -6,7 +6,9 @@ package gc
import ( import (
"cmd/compile/internal/base" "cmd/compile/internal/base"
"cmd/compile/internal/bitvec"
"cmd/compile/internal/ir" "cmd/compile/internal/ir"
"cmd/compile/internal/objw"
"cmd/compile/internal/ssa" "cmd/compile/internal/ssa"
"cmd/compile/internal/typecheck" "cmd/compile/internal/typecheck"
"cmd/compile/internal/types" "cmd/compile/internal/types"
@ -34,13 +36,13 @@ func emitptrargsmap(fn *ir.Func) {
} }
lsym := base.Ctxt.Lookup(fn.LSym.Name + ".args_stackmap") lsym := base.Ctxt.Lookup(fn.LSym.Name + ".args_stackmap")
nptr := int(fn.Type().ArgWidth() / int64(types.PtrSize)) nptr := int(fn.Type().ArgWidth() / int64(types.PtrSize))
bv := bvalloc(int32(nptr) * 2) bv := bitvec.New(int32(nptr) * 2)
nbitmap := 1 nbitmap := 1
if fn.Type().NumResults() > 0 { if fn.Type().NumResults() > 0 {
nbitmap = 2 nbitmap = 2
} }
off := duint32(lsym, 0, uint32(nbitmap)) off := objw.Uint32(lsym, 0, uint32(nbitmap))
off = duint32(lsym, off, uint32(bv.n)) off = objw.Uint32(lsym, off, uint32(bv.N))
if ir.IsMethod(fn) { if ir.IsMethod(fn) {
onebitwalktype1(fn.Type().Recvs(), 0, bv) onebitwalktype1(fn.Type().Recvs(), 0, bv)
@ -48,14 +50,14 @@ func emitptrargsmap(fn *ir.Func) {
if fn.Type().NumParams() > 0 { if fn.Type().NumParams() > 0 {
onebitwalktype1(fn.Type().Params(), 0, bv) onebitwalktype1(fn.Type().Params(), 0, bv)
} }
off = dbvec(lsym, off, bv) off = objw.BitVec(lsym, off, bv)
if fn.Type().NumResults() > 0 { if fn.Type().NumResults() > 0 {
onebitwalktype1(fn.Type().Results(), 0, bv) onebitwalktype1(fn.Type().Results(), 0, bv)
off = dbvec(lsym, off, bv) off = objw.BitVec(lsym, off, bv)
} }
ggloblsym(lsym, int32(off), obj.RODATA|obj.LOCAL) objw.Global(lsym, int32(off), obj.RODATA|obj.LOCAL)
} }
// cmpstackvarlt reports whether the stack variable a sorts before b. // cmpstackvarlt reports whether the stack variable a sorts before b.
@ -314,7 +316,7 @@ func compileSSA(fn *ir.Func, worker int) {
largeStackFramesMu.Unlock() largeStackFramesMu.Unlock()
return return
} }
pp := newProgs(fn, worker) pp := objw.NewProgs(fn, worker)
defer pp.Free() defer pp.Free()
genssa(f, pp) genssa(f, pp)
// Check frame size again. // Check frame size again.

View File

@ -16,7 +16,9 @@ package gc
import ( import (
"cmd/compile/internal/base" "cmd/compile/internal/base"
"cmd/compile/internal/bitvec"
"cmd/compile/internal/ir" "cmd/compile/internal/ir"
"cmd/compile/internal/objw"
"cmd/compile/internal/ssa" "cmd/compile/internal/ssa"
"cmd/compile/internal/types" "cmd/compile/internal/types"
"cmd/internal/obj" "cmd/internal/obj"
@ -88,15 +90,15 @@ type BlockEffects struct {
// //
// uevar: upward exposed variables (used before set in block) // uevar: upward exposed variables (used before set in block)
// varkill: killed variables (set in block) // varkill: killed variables (set in block)
uevar bvec uevar bitvec.BitVec
varkill bvec varkill bitvec.BitVec
// Computed during Liveness.solve using control flow information: // Computed during Liveness.solve using control flow information:
// //
// livein: variables live at block entry // livein: variables live at block entry
// liveout: variables live at block exit // liveout: variables live at block exit
livein bvec livein bitvec.BitVec
liveout bvec liveout bitvec.BitVec
} }
// A collection of global state used by liveness analysis. // A collection of global state used by liveness analysis.
@ -114,84 +116,54 @@ type Liveness struct {
allUnsafe bool allUnsafe bool
// unsafePoints bit i is set if Value ID i is an unsafe-point // unsafePoints bit i is set if Value ID i is an unsafe-point
// (preemption is not allowed). Only valid if !allUnsafe. // (preemption is not allowed). Only valid if !allUnsafe.
unsafePoints bvec unsafePoints bitvec.BitVec
// An array with a bit vector for each safe point in the // An array with a bit vector for each safe point in the
// current Block during Liveness.epilogue. Indexed in Value // current Block during Liveness.epilogue. Indexed in Value
// order for that block. Additionally, for the entry block // order for that block. Additionally, for the entry block
// livevars[0] is the entry bitmap. Liveness.compact moves // livevars[0] is the entry bitmap. Liveness.compact moves
// these to stackMaps. // these to stackMaps.
livevars []bvec livevars []bitvec.BitVec
// livenessMap maps from safe points (i.e., CALLs) to their // livenessMap maps from safe points (i.e., CALLs) to their
// liveness map indexes. // liveness map indexes.
livenessMap LivenessMap livenessMap LivenessMap
stackMapSet bvecSet stackMapSet bvecSet
stackMaps []bvec stackMaps []bitvec.BitVec
cache progeffectscache cache progeffectscache
} }
// LivenessMap maps from *ssa.Value to LivenessIndex. // LivenessMap maps from *ssa.Value to LivenessIndex.
type LivenessMap struct { type LivenessMap struct {
vals map[ssa.ID]LivenessIndex vals map[ssa.ID]objw.LivenessIndex
// The set of live, pointer-containing variables at the deferreturn // The set of live, pointer-containing variables at the deferreturn
// call (only set when open-coded defers are used). // call (only set when open-coded defers are used).
deferreturn LivenessIndex deferreturn objw.LivenessIndex
} }
func (m *LivenessMap) reset() { func (m *LivenessMap) reset() {
if m.vals == nil { if m.vals == nil {
m.vals = make(map[ssa.ID]LivenessIndex) m.vals = make(map[ssa.ID]objw.LivenessIndex)
} else { } else {
for k := range m.vals { for k := range m.vals {
delete(m.vals, k) delete(m.vals, k)
} }
} }
m.deferreturn = LivenessDontCare m.deferreturn = objw.LivenessDontCare
} }
func (m *LivenessMap) set(v *ssa.Value, i LivenessIndex) { func (m *LivenessMap) set(v *ssa.Value, i objw.LivenessIndex) {
m.vals[v.ID] = i m.vals[v.ID] = i
} }
func (m LivenessMap) Get(v *ssa.Value) LivenessIndex { func (m LivenessMap) Get(v *ssa.Value) objw.LivenessIndex {
// If v isn't in the map, then it's a "don't care" and not an // If v isn't in the map, then it's a "don't care" and not an
// unsafe-point. // unsafe-point.
if idx, ok := m.vals[v.ID]; ok { if idx, ok := m.vals[v.ID]; ok {
return idx return idx
} }
return LivenessIndex{StackMapDontCare, false} return objw.LivenessIndex{StackMapIndex: objw.StackMapDontCare, IsUnsafePoint: false}
}
// LivenessIndex stores the liveness map information for a Value.
type LivenessIndex struct {
stackMapIndex int
// isUnsafePoint indicates that this is an unsafe-point.
//
// Note that it's possible for a call Value to have a stack
// map while also being an unsafe-point. This means it cannot
// be preempted at this instruction, but that a preemption or
// stack growth may happen in the called function.
isUnsafePoint bool
}
// LivenessDontCare indicates that the liveness information doesn't
// matter. Currently it is used in deferreturn liveness when we don't
// actually need it. It should never be emitted to the PCDATA stream.
var LivenessDontCare = LivenessIndex{StackMapDontCare, true}
// StackMapDontCare indicates that the stack map index at a Value
// doesn't matter.
//
// This is a sentinel value that should never be emitted to the PCDATA
// stream. We use -1000 because that's obviously never a valid stack
// index (but -1 is).
const StackMapDontCare = -1000
func (idx LivenessIndex) StackMapValid() bool {
return idx.stackMapIndex != StackMapDontCare
} }
type progeffectscache struct { type progeffectscache struct {
@ -380,7 +352,7 @@ func newliveness(fn *ir.Func, f *ssa.Func, vars []*ir.Name, idx map[*ir.Name]int
if cap(lc.be) >= f.NumBlocks() { if cap(lc.be) >= f.NumBlocks() {
lv.be = lc.be[:f.NumBlocks()] lv.be = lc.be[:f.NumBlocks()]
} }
lv.livenessMap = LivenessMap{vals: lc.livenessMap.vals, deferreturn: LivenessDontCare} lv.livenessMap = LivenessMap{vals: lc.livenessMap.vals, deferreturn: objw.LivenessDontCare}
lc.livenessMap.vals = nil lc.livenessMap.vals = nil
} }
if lv.be == nil { if lv.be == nil {
@ -389,14 +361,14 @@ func newliveness(fn *ir.Func, f *ssa.Func, vars []*ir.Name, idx map[*ir.Name]int
nblocks := int32(len(f.Blocks)) nblocks := int32(len(f.Blocks))
nvars := int32(len(vars)) nvars := int32(len(vars))
bulk := bvbulkalloc(nvars, nblocks*7) bulk := bitvec.NewBulk(nvars, nblocks*7)
for _, b := range f.Blocks { for _, b := range f.Blocks {
be := lv.blockEffects(b) be := lv.blockEffects(b)
be.uevar = bulk.next() be.uevar = bulk.Next()
be.varkill = bulk.next() be.varkill = bulk.Next()
be.livein = bulk.next() be.livein = bulk.Next()
be.liveout = bulk.next() be.liveout = bulk.Next()
} }
lv.livenessMap.reset() lv.livenessMap.reset()
@ -411,7 +383,7 @@ func (lv *Liveness) blockEffects(b *ssa.Block) *BlockEffects {
// NOTE: The bitmap for a specific type t could be cached in t after // NOTE: The bitmap for a specific type t could be cached in t after
// the first run and then simply copied into bv at the correct offset // the first run and then simply copied into bv at the correct offset
// on future calls with the same type t. // on future calls with the same type t.
func onebitwalktype1(t *types.Type, off int64, bv bvec) { func onebitwalktype1(t *types.Type, off int64, bv bitvec.BitVec) {
if t.Align > 0 && off&int64(t.Align-1) != 0 { if t.Align > 0 && off&int64(t.Align-1) != 0 {
base.Fatalf("onebitwalktype1: invalid initial alignment: type %v has alignment %d, but offset is %v", t, t.Align, off) base.Fatalf("onebitwalktype1: invalid initial alignment: type %v has alignment %d, but offset is %v", t, t.Align, off)
} }
@ -487,7 +459,7 @@ func onebitwalktype1(t *types.Type, off int64, bv bvec) {
// Generates live pointer value maps for arguments and local variables. The // Generates live pointer value maps for arguments and local variables. The
// this argument and the in arguments are always assumed live. The vars // this argument and the in arguments are always assumed live. The vars
// argument is a slice of *Nodes. // argument is a slice of *Nodes.
func (lv *Liveness) pointerMap(liveout bvec, vars []*ir.Name, args, locals bvec) { func (lv *Liveness) pointerMap(liveout bitvec.BitVec, vars []*ir.Name, args, locals bitvec.BitVec) {
for i := int32(0); ; i++ { for i := int32(0); ; i++ {
i = liveout.Next(i) i = liveout.Next(i)
if i < 0 { if i < 0 {
@ -527,7 +499,7 @@ func (lv *Liveness) markUnsafePoints() {
return return
} }
lv.unsafePoints = bvalloc(int32(lv.f.NumValues())) lv.unsafePoints = bitvec.New(int32(lv.f.NumValues()))
// Mark architecture-specific unsafe points. // Mark architecture-specific unsafe points.
for _, b := range lv.f.Blocks { for _, b := range lv.f.Blocks {
@ -638,11 +610,11 @@ func (lv *Liveness) markUnsafePoints() {
// nice to only flood as far as the unsafe.Pointer -> uintptr // nice to only flood as far as the unsafe.Pointer -> uintptr
// conversion, but it's hard to know which argument of an Add // conversion, but it's hard to know which argument of an Add
// or Sub to follow. // or Sub to follow.
var flooded bvec var flooded bitvec.BitVec
var flood func(b *ssa.Block, vi int) var flood func(b *ssa.Block, vi int)
flood = func(b *ssa.Block, vi int) { flood = func(b *ssa.Block, vi int) {
if flooded.n == 0 { if flooded.N == 0 {
flooded = bvalloc(int32(lv.f.NumBlocks())) flooded = bitvec.New(int32(lv.f.NumBlocks()))
} }
if flooded.Get(int32(b.ID)) { if flooded.Get(int32(b.ID)) {
return return
@ -725,8 +697,8 @@ func (lv *Liveness) solve() {
// These temporary bitvectors exist to avoid successive allocations and // These temporary bitvectors exist to avoid successive allocations and
// frees within the loop. // frees within the loop.
nvars := int32(len(lv.vars)) nvars := int32(len(lv.vars))
newlivein := bvalloc(nvars) newlivein := bitvec.New(nvars)
newliveout := bvalloc(nvars) newliveout := bitvec.New(nvars)
// Walk blocks in postorder ordering. This improves convergence. // Walk blocks in postorder ordering. This improves convergence.
po := lv.f.Postorder() po := lv.f.Postorder()
@ -783,8 +755,8 @@ func (lv *Liveness) solve() {
// variables at each safe point locations. // variables at each safe point locations.
func (lv *Liveness) epilogue() { func (lv *Liveness) epilogue() {
nvars := int32(len(lv.vars)) nvars := int32(len(lv.vars))
liveout := bvalloc(nvars) liveout := bitvec.New(nvars)
livedefer := bvalloc(nvars) // always-live variables livedefer := bitvec.New(nvars) // always-live variables
// If there is a defer (that could recover), then all output // If there is a defer (that could recover), then all output
// parameters are live all the time. In addition, any locals // parameters are live all the time. In addition, any locals
@ -838,7 +810,7 @@ func (lv *Liveness) epilogue() {
{ {
// Reserve an entry for function entry. // Reserve an entry for function entry.
live := bvalloc(nvars) live := bitvec.New(nvars)
lv.livevars = append(lv.livevars, live) lv.livevars = append(lv.livevars, live)
} }
@ -852,7 +824,7 @@ func (lv *Liveness) epilogue() {
continue continue
} }
live := bvalloc(nvars) live := bitvec.New(nvars)
lv.livevars = append(lv.livevars, live) lv.livevars = append(lv.livevars, live)
} }
@ -910,16 +882,16 @@ func (lv *Liveness) epilogue() {
// If we have an open-coded deferreturn call, make a liveness map for it. // If we have an open-coded deferreturn call, make a liveness map for it.
if lv.fn.OpenCodedDeferDisallowed() { if lv.fn.OpenCodedDeferDisallowed() {
lv.livenessMap.deferreturn = LivenessDontCare lv.livenessMap.deferreturn = objw.LivenessDontCare
} else { } else {
lv.livenessMap.deferreturn = LivenessIndex{ lv.livenessMap.deferreturn = objw.LivenessIndex{
stackMapIndex: lv.stackMapSet.add(livedefer), StackMapIndex: lv.stackMapSet.add(livedefer),
isUnsafePoint: false, IsUnsafePoint: false,
} }
} }
// Done compacting. Throw out the stack map set. // Done compacting. Throw out the stack map set.
lv.stackMaps = lv.stackMapSet.extractUniqe() lv.stackMaps = lv.stackMapSet.extractUnique()
lv.stackMapSet = bvecSet{} lv.stackMapSet = bvecSet{}
// Useful sanity check: on entry to the function, // Useful sanity check: on entry to the function,
@ -958,9 +930,9 @@ func (lv *Liveness) compact(b *ssa.Block) {
for _, v := range b.Values { for _, v := range b.Values {
hasStackMap := lv.hasStackMap(v) hasStackMap := lv.hasStackMap(v)
isUnsafePoint := lv.allUnsafe || lv.unsafePoints.Get(int32(v.ID)) isUnsafePoint := lv.allUnsafe || lv.unsafePoints.Get(int32(v.ID))
idx := LivenessIndex{StackMapDontCare, isUnsafePoint} idx := objw.LivenessIndex{StackMapIndex: objw.StackMapDontCare, IsUnsafePoint: isUnsafePoint}
if hasStackMap { if hasStackMap {
idx.stackMapIndex = lv.stackMapSet.add(lv.livevars[pos]) idx.StackMapIndex = lv.stackMapSet.add(lv.livevars[pos])
pos++ pos++
} }
if hasStackMap || isUnsafePoint { if hasStackMap || isUnsafePoint {
@ -972,7 +944,7 @@ func (lv *Liveness) compact(b *ssa.Block) {
lv.livevars = lv.livevars[:0] lv.livevars = lv.livevars[:0]
} }
func (lv *Liveness) showlive(v *ssa.Value, live bvec) { func (lv *Liveness) showlive(v *ssa.Value, live bitvec.BitVec) {
if base.Flag.Live == 0 || ir.FuncName(lv.fn) == "init" || strings.HasPrefix(ir.FuncName(lv.fn), ".") { if base.Flag.Live == 0 || ir.FuncName(lv.fn) == "init" || strings.HasPrefix(ir.FuncName(lv.fn), ".") {
return return
} }
@ -1012,7 +984,7 @@ func (lv *Liveness) showlive(v *ssa.Value, live bvec) {
base.WarnfAt(pos, s) base.WarnfAt(pos, s)
} }
func (lv *Liveness) printbvec(printed bool, name string, live bvec) bool { func (lv *Liveness) printbvec(printed bool, name string, live bitvec.BitVec) bool {
if live.IsEmpty() { if live.IsEmpty() {
return printed return printed
} }
@ -1128,7 +1100,7 @@ func (lv *Liveness) printDebug() {
fmt.Printf("\tlive=") fmt.Printf("\tlive=")
printed = false printed = false
if pcdata.StackMapValid() { if pcdata.StackMapValid() {
live := lv.stackMaps[pcdata.stackMapIndex] live := lv.stackMaps[pcdata.StackMapIndex]
for j, n := range lv.vars { for j, n := range lv.vars {
if !live.Get(int32(j)) { if !live.Get(int32(j)) {
continue continue
@ -1143,7 +1115,7 @@ func (lv *Liveness) printDebug() {
fmt.Printf("\n") fmt.Printf("\n")
} }
if pcdata.isUnsafePoint { if pcdata.IsUnsafePoint {
fmt.Printf("\tunsafe-point\n") fmt.Printf("\tunsafe-point\n")
} }
} }
@ -1196,13 +1168,13 @@ func (lv *Liveness) emit() (argsSym, liveSym *obj.LSym) {
// Temporary symbols for encoding bitmaps. // Temporary symbols for encoding bitmaps.
var argsSymTmp, liveSymTmp obj.LSym var argsSymTmp, liveSymTmp obj.LSym
args := bvalloc(int32(maxArgs / int64(types.PtrSize))) args := bitvec.New(int32(maxArgs / int64(types.PtrSize)))
aoff := duint32(&argsSymTmp, 0, uint32(len(lv.stackMaps))) // number of bitmaps aoff := objw.Uint32(&argsSymTmp, 0, uint32(len(lv.stackMaps))) // number of bitmaps
aoff = duint32(&argsSymTmp, aoff, uint32(args.n)) // number of bits in each bitmap aoff = objw.Uint32(&argsSymTmp, aoff, uint32(args.N)) // number of bits in each bitmap
locals := bvalloc(int32(maxLocals / int64(types.PtrSize))) locals := bitvec.New(int32(maxLocals / int64(types.PtrSize)))
loff := duint32(&liveSymTmp, 0, uint32(len(lv.stackMaps))) // number of bitmaps loff := objw.Uint32(&liveSymTmp, 0, uint32(len(lv.stackMaps))) // number of bitmaps
loff = duint32(&liveSymTmp, loff, uint32(locals.n)) // number of bits in each bitmap loff = objw.Uint32(&liveSymTmp, loff, uint32(locals.N)) // number of bits in each bitmap
for _, live := range lv.stackMaps { for _, live := range lv.stackMaps {
args.Clear() args.Clear()
@ -1210,8 +1182,8 @@ func (lv *Liveness) emit() (argsSym, liveSym *obj.LSym) {
lv.pointerMap(live, lv.vars, args, locals) lv.pointerMap(live, lv.vars, args, locals)
aoff = dbvec(&argsSymTmp, aoff, args) aoff = objw.BitVec(&argsSymTmp, aoff, args)
loff = dbvec(&liveSymTmp, loff, locals) loff = objw.BitVec(&liveSymTmp, loff, locals)
} }
// Give these LSyms content-addressable names, // Give these LSyms content-addressable names,
@ -1233,7 +1205,7 @@ func (lv *Liveness) emit() (argsSym, liveSym *obj.LSym) {
// pointer variables in the function and emits a runtime data // pointer variables in the function and emits a runtime data
// structure read by the garbage collector. // structure read by the garbage collector.
// Returns a map from GC safe points to their corresponding stack map index. // Returns a map from GC safe points to their corresponding stack map index.
func liveness(curfn *ir.Func, f *ssa.Func, stkptrsize int64, pp *Progs) LivenessMap { func liveness(curfn *ir.Func, f *ssa.Func, stkptrsize int64, pp *objw.Progs) LivenessMap {
// Construct the global liveness state. // Construct the global liveness state.
vars, idx := getvariables(curfn) vars, idx := getvariables(curfn)
lv := newliveness(curfn, f, vars, idx, stkptrsize) lv := newliveness(curfn, f, vars, idx, stkptrsize)
@ -1247,7 +1219,7 @@ func liveness(curfn *ir.Func, f *ssa.Func, stkptrsize int64, pp *Progs) Liveness
for _, b := range f.Blocks { for _, b := range f.Blocks {
for _, val := range b.Values { for _, val := range b.Values {
if idx := lv.livenessMap.Get(val); idx.StackMapValid() { if idx := lv.livenessMap.Get(val); idx.StackMapValid() {
lv.showlive(val, lv.stackMaps[idx.stackMapIndex]) lv.showlive(val, lv.stackMaps[idx.StackMapIndex])
} }
} }
} }
@ -1276,13 +1248,13 @@ func liveness(curfn *ir.Func, f *ssa.Func, stkptrsize int64, pp *Progs) Liveness
fninfo.GCArgs, fninfo.GCLocals = lv.emit() fninfo.GCArgs, fninfo.GCLocals = lv.emit()
p := pp.Prog(obj.AFUNCDATA) p := pp.Prog(obj.AFUNCDATA)
Addrconst(&p.From, objabi.FUNCDATA_ArgsPointerMaps) p.From.SetConst(objabi.FUNCDATA_ArgsPointerMaps)
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN p.To.Name = obj.NAME_EXTERN
p.To.Sym = fninfo.GCArgs p.To.Sym = fninfo.GCArgs
p = pp.Prog(obj.AFUNCDATA) p = pp.Prog(obj.AFUNCDATA)
Addrconst(&p.From, objabi.FUNCDATA_LocalsPointerMaps) p.From.SetConst(objabi.FUNCDATA_LocalsPointerMaps)
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN p.To.Name = obj.NAME_EXTERN
p.To.Sym = fninfo.GCLocals p.To.Sym = fninfo.GCLocals

View File

@ -6,7 +6,9 @@ package gc
import ( import (
"cmd/compile/internal/base" "cmd/compile/internal/base"
"cmd/compile/internal/bitvec"
"cmd/compile/internal/ir" "cmd/compile/internal/ir"
"cmd/compile/internal/objw"
"cmd/compile/internal/typecheck" "cmd/compile/internal/typecheck"
"cmd/compile/internal/types" "cmd/compile/internal/types"
"cmd/internal/gcprog" "cmd/internal/gcprog"
@ -472,14 +474,14 @@ func dimportpath(p *types.Pkg) {
s := base.Ctxt.Lookup("type..importpath." + p.Prefix + ".") s := base.Ctxt.Lookup("type..importpath." + p.Prefix + ".")
ot := dnameData(s, 0, str, "", nil, false) ot := dnameData(s, 0, str, "", nil, false)
ggloblsym(s, int32(ot), obj.DUPOK|obj.RODATA) objw.Global(s, int32(ot), obj.DUPOK|obj.RODATA)
s.Set(obj.AttrContentAddressable, true) s.Set(obj.AttrContentAddressable, true)
p.Pathsym = s p.Pathsym = s
} }
func dgopkgpath(s *obj.LSym, ot int, pkg *types.Pkg) int { func dgopkgpath(s *obj.LSym, ot int, pkg *types.Pkg) int {
if pkg == nil { if pkg == nil {
return duintptr(s, ot, 0) return objw.Uintptr(s, ot, 0)
} }
if pkg == types.LocalPkg && base.Ctxt.Pkgpath == "" { if pkg == types.LocalPkg && base.Ctxt.Pkgpath == "" {
@ -489,17 +491,17 @@ func dgopkgpath(s *obj.LSym, ot int, pkg *types.Pkg) int {
// Every package that imports this one directly defines the symbol. // Every package that imports this one directly defines the symbol.
// See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ. // See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ.
ns := base.Ctxt.Lookup(`type..importpath."".`) ns := base.Ctxt.Lookup(`type..importpath."".`)
return dsymptr(s, ot, ns, 0) return objw.SymPtr(s, ot, ns, 0)
} }
dimportpath(pkg) dimportpath(pkg)
return dsymptr(s, ot, pkg.Pathsym, 0) return objw.SymPtr(s, ot, pkg.Pathsym, 0)
} }
// dgopkgpathOff writes an offset relocation in s at offset ot to the pkg path symbol. // dgopkgpathOff writes an offset relocation in s at offset ot to the pkg path symbol.
func dgopkgpathOff(s *obj.LSym, ot int, pkg *types.Pkg) int { func dgopkgpathOff(s *obj.LSym, ot int, pkg *types.Pkg) int {
if pkg == nil { if pkg == nil {
return duint32(s, ot, 0) return objw.Uint32(s, ot, 0)
} }
if pkg == types.LocalPkg && base.Ctxt.Pkgpath == "" { if pkg == types.LocalPkg && base.Ctxt.Pkgpath == "" {
// If we don't know the full import path of the package being compiled // If we don't know the full import path of the package being compiled
@ -508,11 +510,11 @@ func dgopkgpathOff(s *obj.LSym, ot int, pkg *types.Pkg) int {
// Every package that imports this one directly defines the symbol. // Every package that imports this one directly defines the symbol.
// See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ. // See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ.
ns := base.Ctxt.Lookup(`type..importpath."".`) ns := base.Ctxt.Lookup(`type..importpath."".`)
return dsymptrOff(s, ot, ns) return objw.SymPtrOff(s, ot, ns)
} }
dimportpath(pkg) dimportpath(pkg)
return dsymptrOff(s, ot, pkg.Pathsym) return objw.SymPtrOff(s, ot, pkg.Pathsym)
} }
// dnameField dumps a reflect.name for a struct field. // dnameField dumps a reflect.name for a struct field.
@ -521,7 +523,7 @@ func dnameField(lsym *obj.LSym, ot int, spkg *types.Pkg, ft *types.Field) int {
base.Fatalf("package mismatch for %v", ft.Sym) base.Fatalf("package mismatch for %v", ft.Sym)
} }
nsym := dname(ft.Sym.Name, ft.Note, nil, types.IsExported(ft.Sym.Name)) nsym := dname(ft.Sym.Name, ft.Note, nil, types.IsExported(ft.Sym.Name))
return dsymptr(lsym, ot, nsym, 0) return objw.SymPtr(lsym, ot, nsym, 0)
} }
// dnameData writes the contents of a reflect.name into s at offset ot. // dnameData writes the contents of a reflect.name into s at offset ot.
@ -600,7 +602,7 @@ func dname(name, tag string, pkg *types.Pkg, exported bool) *obj.LSym {
return s return s
} }
ot := dnameData(s, 0, name, tag, pkg, exported) ot := dnameData(s, 0, name, tag, pkg, exported)
ggloblsym(s, int32(ot), obj.DUPOK|obj.RODATA) objw.Global(s, int32(ot), obj.DUPOK|obj.RODATA)
s.Set(obj.AttrContentAddressable, true) s.Set(obj.AttrContentAddressable, true)
return s return s
} }
@ -634,10 +636,10 @@ func dextratype(lsym *obj.LSym, ot int, t *types.Type, dataAdd int) int {
base.Fatalf("methods are too far away on %v: %d", t, dataAdd) base.Fatalf("methods are too far away on %v: %d", t, dataAdd)
} }
ot = duint16(lsym, ot, uint16(mcount)) ot = objw.Uint16(lsym, ot, uint16(mcount))
ot = duint16(lsym, ot, uint16(xcount)) ot = objw.Uint16(lsym, ot, uint16(xcount))
ot = duint32(lsym, ot, uint32(dataAdd)) ot = objw.Uint32(lsym, ot, uint32(dataAdd))
ot = duint32(lsym, ot, 0) ot = objw.Uint32(lsym, ot, 0)
return ot return ot
} }
@ -669,7 +671,7 @@ func dextratypeData(lsym *obj.LSym, ot int, t *types.Type) int {
} }
nsym := dname(a.name.Name, "", pkg, exported) nsym := dname(a.name.Name, "", pkg, exported)
ot = dsymptrOff(lsym, ot, nsym) ot = objw.SymPtrOff(lsym, ot, nsym)
ot = dmethodptrOff(lsym, ot, dtypesym(a.mtype)) ot = dmethodptrOff(lsym, ot, dtypesym(a.mtype))
ot = dmethodptrOff(lsym, ot, a.isym.Linksym()) ot = dmethodptrOff(lsym, ot, a.isym.Linksym())
ot = dmethodptrOff(lsym, ot, a.tsym.Linksym()) ot = dmethodptrOff(lsym, ot, a.tsym.Linksym())
@ -678,7 +680,7 @@ func dextratypeData(lsym *obj.LSym, ot int, t *types.Type) int {
} }
func dmethodptrOff(s *obj.LSym, ot int, x *obj.LSym) int { func dmethodptrOff(s *obj.LSym, ot int, x *obj.LSym) int {
duint32(s, ot, 0) objw.Uint32(s, ot, 0)
r := obj.Addrel(s) r := obj.Addrel(s)
r.Off = int32(ot) r.Off = int32(ot)
r.Siz = 4 r.Siz = 4
@ -768,9 +770,9 @@ func dcommontype(lsym *obj.LSym, t *types.Type) int {
// ptrToThis typeOff // ptrToThis typeOff
// } // }
ot := 0 ot := 0
ot = duintptr(lsym, ot, uint64(t.Width)) ot = objw.Uintptr(lsym, ot, uint64(t.Width))
ot = duintptr(lsym, ot, uint64(ptrdata)) ot = objw.Uintptr(lsym, ot, uint64(ptrdata))
ot = duint32(lsym, ot, types.TypeHash(t)) ot = objw.Uint32(lsym, ot, types.TypeHash(t))
var tflag uint8 var tflag uint8
if uncommonSize(t) != 0 { if uncommonSize(t) != 0 {
@ -802,7 +804,7 @@ func dcommontype(lsym *obj.LSym, t *types.Type) int {
} }
} }
ot = duint8(lsym, ot, tflag) ot = objw.Uint8(lsym, ot, tflag)
// runtime (and common sense) expects alignment to be a power of two. // runtime (and common sense) expects alignment to be a power of two.
i := int(t.Align) i := int(t.Align)
@ -813,8 +815,8 @@ func dcommontype(lsym *obj.LSym, t *types.Type) int {
if i&(i-1) != 0 { if i&(i-1) != 0 {
base.Fatalf("invalid alignment %d for %v", t.Align, t) base.Fatalf("invalid alignment %d for %v", t.Align, t)
} }
ot = duint8(lsym, ot, t.Align) // align ot = objw.Uint8(lsym, ot, t.Align) // align
ot = duint8(lsym, ot, t.Align) // fieldAlign ot = objw.Uint8(lsym, ot, t.Align) // fieldAlign
i = kinds[t.Kind()] i = kinds[t.Kind()]
if types.IsDirectIface(t) { if types.IsDirectIface(t) {
@ -823,23 +825,23 @@ func dcommontype(lsym *obj.LSym, t *types.Type) int {
if useGCProg { if useGCProg {
i |= objabi.KindGCProg i |= objabi.KindGCProg
} }
ot = duint8(lsym, ot, uint8(i)) // kind ot = objw.Uint8(lsym, ot, uint8(i)) // kind
if eqfunc != nil { if eqfunc != nil {
ot = dsymptr(lsym, ot, eqfunc, 0) // equality function ot = objw.SymPtr(lsym, ot, eqfunc, 0) // equality function
} else { } else {
ot = duintptr(lsym, ot, 0) // type we can't do == with ot = objw.Uintptr(lsym, ot, 0) // type we can't do == with
} }
ot = dsymptr(lsym, ot, gcsym, 0) // gcdata ot = objw.SymPtr(lsym, ot, gcsym, 0) // gcdata
nsym := dname(p, "", nil, exported) nsym := dname(p, "", nil, exported)
ot = dsymptrOff(lsym, ot, nsym) // str ot = objw.SymPtrOff(lsym, ot, nsym) // str
// ptrToThis // ptrToThis
if sptr == nil { if sptr == nil {
ot = duint32(lsym, ot, 0) ot = objw.Uint32(lsym, ot, 0)
} else if sptrWeak { } else if sptrWeak {
ot = dsymptrWeakOff(lsym, ot, sptr) ot = objw.SymPtrWeakOff(lsym, ot, sptr)
} else { } else {
ot = dsymptrOff(lsym, ot, sptr) ot = objw.SymPtrOff(lsym, ot, sptr)
} }
return ot return ot
@ -1029,24 +1031,24 @@ func dtypesym(t *types.Type) *obj.LSym {
t2 := types.NewSlice(t.Elem()) t2 := types.NewSlice(t.Elem())
s2 := dtypesym(t2) s2 := dtypesym(t2)
ot = dcommontype(lsym, t) ot = dcommontype(lsym, t)
ot = dsymptr(lsym, ot, s1, 0) ot = objw.SymPtr(lsym, ot, s1, 0)
ot = dsymptr(lsym, ot, s2, 0) ot = objw.SymPtr(lsym, ot, s2, 0)
ot = duintptr(lsym, ot, uint64(t.NumElem())) ot = objw.Uintptr(lsym, ot, uint64(t.NumElem()))
ot = dextratype(lsym, ot, t, 0) ot = dextratype(lsym, ot, t, 0)
case types.TSLICE: case types.TSLICE:
// ../../../../runtime/type.go:/sliceType // ../../../../runtime/type.go:/sliceType
s1 := dtypesym(t.Elem()) s1 := dtypesym(t.Elem())
ot = dcommontype(lsym, t) ot = dcommontype(lsym, t)
ot = dsymptr(lsym, ot, s1, 0) ot = objw.SymPtr(lsym, ot, s1, 0)
ot = dextratype(lsym, ot, t, 0) ot = dextratype(lsym, ot, t, 0)
case types.TCHAN: case types.TCHAN:
// ../../../../runtime/type.go:/chanType // ../../../../runtime/type.go:/chanType
s1 := dtypesym(t.Elem()) s1 := dtypesym(t.Elem())
ot = dcommontype(lsym, t) ot = dcommontype(lsym, t)
ot = dsymptr(lsym, ot, s1, 0) ot = objw.SymPtr(lsym, ot, s1, 0)
ot = duintptr(lsym, ot, uint64(t.ChanDir())) ot = objw.Uintptr(lsym, ot, uint64(t.ChanDir()))
ot = dextratype(lsym, ot, t, 0) ot = dextratype(lsym, ot, t, 0)
case types.TFUNC: case types.TFUNC:
@ -1068,8 +1070,8 @@ func dtypesym(t *types.Type) *obj.LSym {
if isddd { if isddd {
outCount |= 1 << 15 outCount |= 1 << 15
} }
ot = duint16(lsym, ot, uint16(inCount)) ot = objw.Uint16(lsym, ot, uint16(inCount))
ot = duint16(lsym, ot, uint16(outCount)) ot = objw.Uint16(lsym, ot, uint16(outCount))
if types.PtrSize == 8 { if types.PtrSize == 8 {
ot += 4 // align for *rtype ot += 4 // align for *rtype
} }
@ -1079,13 +1081,13 @@ func dtypesym(t *types.Type) *obj.LSym {
// Array of rtype pointers follows funcType. // Array of rtype pointers follows funcType.
for _, t1 := range t.Recvs().Fields().Slice() { for _, t1 := range t.Recvs().Fields().Slice() {
ot = dsymptr(lsym, ot, dtypesym(t1.Type), 0) ot = objw.SymPtr(lsym, ot, dtypesym(t1.Type), 0)
} }
for _, t1 := range t.Params().Fields().Slice() { for _, t1 := range t.Params().Fields().Slice() {
ot = dsymptr(lsym, ot, dtypesym(t1.Type), 0) ot = objw.SymPtr(lsym, ot, dtypesym(t1.Type), 0)
} }
for _, t1 := range t.Results().Fields().Slice() { for _, t1 := range t.Results().Fields().Slice() {
ot = dsymptr(lsym, ot, dtypesym(t1.Type), 0) ot = objw.SymPtr(lsym, ot, dtypesym(t1.Type), 0)
} }
case types.TINTER: case types.TINTER:
@ -1104,9 +1106,9 @@ func dtypesym(t *types.Type) *obj.LSym {
} }
ot = dgopkgpath(lsym, ot, tpkg) ot = dgopkgpath(lsym, ot, tpkg)
ot = dsymptr(lsym, ot, lsym, ot+3*types.PtrSize+uncommonSize(t)) ot = objw.SymPtr(lsym, ot, lsym, ot+3*types.PtrSize+uncommonSize(t))
ot = duintptr(lsym, ot, uint64(n)) ot = objw.Uintptr(lsym, ot, uint64(n))
ot = duintptr(lsym, ot, uint64(n)) ot = objw.Uintptr(lsym, ot, uint64(n))
dataAdd := imethodSize() * n dataAdd := imethodSize() * n
ot = dextratype(lsym, ot, t, dataAdd) ot = dextratype(lsym, ot, t, dataAdd)
@ -1119,8 +1121,8 @@ func dtypesym(t *types.Type) *obj.LSym {
} }
nsym := dname(a.name.Name, "", pkg, exported) nsym := dname(a.name.Name, "", pkg, exported)
ot = dsymptrOff(lsym, ot, nsym) ot = objw.SymPtrOff(lsym, ot, nsym)
ot = dsymptrOff(lsym, ot, dtypesym(a.type_)) ot = objw.SymPtrOff(lsym, ot, dtypesym(a.type_))
} }
// ../../../../runtime/type.go:/mapType // ../../../../runtime/type.go:/mapType
@ -1131,27 +1133,27 @@ func dtypesym(t *types.Type) *obj.LSym {
hasher := genhash(t.Key()) hasher := genhash(t.Key())
ot = dcommontype(lsym, t) ot = dcommontype(lsym, t)
ot = dsymptr(lsym, ot, s1, 0) ot = objw.SymPtr(lsym, ot, s1, 0)
ot = dsymptr(lsym, ot, s2, 0) ot = objw.SymPtr(lsym, ot, s2, 0)
ot = dsymptr(lsym, ot, s3, 0) ot = objw.SymPtr(lsym, ot, s3, 0)
ot = dsymptr(lsym, ot, hasher, 0) ot = objw.SymPtr(lsym, ot, hasher, 0)
var flags uint32 var flags uint32
// Note: flags must match maptype accessors in ../../../../runtime/type.go // Note: flags must match maptype accessors in ../../../../runtime/type.go
// and maptype builder in ../../../../reflect/type.go:MapOf. // and maptype builder in ../../../../reflect/type.go:MapOf.
if t.Key().Width > MAXKEYSIZE { if t.Key().Width > MAXKEYSIZE {
ot = duint8(lsym, ot, uint8(types.PtrSize)) ot = objw.Uint8(lsym, ot, uint8(types.PtrSize))
flags |= 1 // indirect key flags |= 1 // indirect key
} else { } else {
ot = duint8(lsym, ot, uint8(t.Key().Width)) ot = objw.Uint8(lsym, ot, uint8(t.Key().Width))
} }
if t.Elem().Width > MAXELEMSIZE { if t.Elem().Width > MAXELEMSIZE {
ot = duint8(lsym, ot, uint8(types.PtrSize)) ot = objw.Uint8(lsym, ot, uint8(types.PtrSize))
flags |= 2 // indirect value flags |= 2 // indirect value
} else { } else {
ot = duint8(lsym, ot, uint8(t.Elem().Width)) ot = objw.Uint8(lsym, ot, uint8(t.Elem().Width))
} }
ot = duint16(lsym, ot, uint16(bmap(t).Width)) ot = objw.Uint16(lsym, ot, uint16(bmap(t).Width))
if types.IsReflexive(t.Key()) { if types.IsReflexive(t.Key()) {
flags |= 4 // reflexive key flags |= 4 // reflexive key
} }
@ -1161,7 +1163,7 @@ func dtypesym(t *types.Type) *obj.LSym {
if hashMightPanic(t.Key()) { if hashMightPanic(t.Key()) {
flags |= 16 // hash might panic flags |= 16 // hash might panic
} }
ot = duint32(lsym, ot, flags) ot = objw.Uint32(lsym, ot, flags)
ot = dextratype(lsym, ot, t, 0) ot = dextratype(lsym, ot, t, 0)
case types.TPTR: case types.TPTR:
@ -1177,7 +1179,7 @@ func dtypesym(t *types.Type) *obj.LSym {
s1 := dtypesym(t.Elem()) s1 := dtypesym(t.Elem())
ot = dcommontype(lsym, t) ot = dcommontype(lsym, t)
ot = dsymptr(lsym, ot, s1, 0) ot = objw.SymPtr(lsym, ot, s1, 0)
ot = dextratype(lsym, ot, t, 0) ot = dextratype(lsym, ot, t, 0)
// ../../../../runtime/type.go:/structType // ../../../../runtime/type.go:/structType
@ -1203,9 +1205,9 @@ func dtypesym(t *types.Type) *obj.LSym {
ot = dcommontype(lsym, t) ot = dcommontype(lsym, t)
ot = dgopkgpath(lsym, ot, spkg) ot = dgopkgpath(lsym, ot, spkg)
ot = dsymptr(lsym, ot, lsym, ot+3*types.PtrSize+uncommonSize(t)) ot = objw.SymPtr(lsym, ot, lsym, ot+3*types.PtrSize+uncommonSize(t))
ot = duintptr(lsym, ot, uint64(len(fields))) ot = objw.Uintptr(lsym, ot, uint64(len(fields)))
ot = duintptr(lsym, ot, uint64(len(fields))) ot = objw.Uintptr(lsym, ot, uint64(len(fields)))
dataAdd := len(fields) * structfieldSize() dataAdd := len(fields) * structfieldSize()
ot = dextratype(lsym, ot, t, dataAdd) ot = dextratype(lsym, ot, t, dataAdd)
@ -1213,7 +1215,7 @@ func dtypesym(t *types.Type) *obj.LSym {
for _, f := range fields { for _, f := range fields {
// ../../../../runtime/type.go:/structField // ../../../../runtime/type.go:/structField
ot = dnameField(lsym, ot, spkg, f) ot = dnameField(lsym, ot, spkg, f)
ot = dsymptr(lsym, ot, dtypesym(f.Type), 0) ot = objw.SymPtr(lsym, ot, dtypesym(f.Type), 0)
offsetAnon := uint64(f.Offset) << 1 offsetAnon := uint64(f.Offset) << 1
if offsetAnon>>1 != uint64(f.Offset) { if offsetAnon>>1 != uint64(f.Offset) {
base.Fatalf("%v: bad field offset for %s", t, f.Sym.Name) base.Fatalf("%v: bad field offset for %s", t, f.Sym.Name)
@ -1221,12 +1223,12 @@ func dtypesym(t *types.Type) *obj.LSym {
if f.Embedded != 0 { if f.Embedded != 0 {
offsetAnon |= 1 offsetAnon |= 1
} }
ot = duintptr(lsym, ot, offsetAnon) ot = objw.Uintptr(lsym, ot, offsetAnon)
} }
} }
ot = dextratypeData(lsym, ot, t) ot = dextratypeData(lsym, ot, t)
ggloblsym(lsym, int32(ot), int16(dupok|obj.RODATA)) objw.Global(lsym, int32(ot), int16(dupok|obj.RODATA))
// The linker will leave a table of all the typelinks for // The linker will leave a table of all the typelinks for
// types in the binary, so the runtime can find them. // types in the binary, so the runtime can find them.
@ -1396,15 +1398,15 @@ func dumptabs() {
// _ [4]byte // _ [4]byte
// fun [1]uintptr // variable sized // fun [1]uintptr // variable sized
// } // }
o := dsymptr(i.lsym, 0, dtypesym(i.itype), 0) o := objw.SymPtr(i.lsym, 0, dtypesym(i.itype), 0)
o = dsymptr(i.lsym, o, dtypesym(i.t), 0) o = objw.SymPtr(i.lsym, o, dtypesym(i.t), 0)
o = duint32(i.lsym, o, types.TypeHash(i.t)) // copy of type hash o = objw.Uint32(i.lsym, o, types.TypeHash(i.t)) // copy of type hash
o += 4 // skip unused field o += 4 // skip unused field
for _, fn := range genfun(i.t, i.itype) { for _, fn := range genfun(i.t, i.itype) {
o = dsymptr(i.lsym, o, fn, 0) // method pointer for each method o = objw.SymPtr(i.lsym, o, fn, 0) // method pointer for each method
} }
// Nothing writes static itabs, so they are read only. // Nothing writes static itabs, so they are read only.
ggloblsym(i.lsym, int32(o), int16(obj.DUPOK|obj.RODATA)) objw.Global(i.lsym, int32(o), int16(obj.DUPOK|obj.RODATA))
i.lsym.Set(obj.AttrContentAddressable, true) i.lsym.Set(obj.AttrContentAddressable, true)
} }
@ -1421,20 +1423,20 @@ func dumptabs() {
// } // }
nsym := dname(p.s.Name, "", nil, true) nsym := dname(p.s.Name, "", nil, true)
tsym := dtypesym(p.t) tsym := dtypesym(p.t)
ot = dsymptrOff(s, ot, nsym) ot = objw.SymPtrOff(s, ot, nsym)
ot = dsymptrOff(s, ot, tsym) ot = objw.SymPtrOff(s, ot, tsym)
// Plugin exports symbols as interfaces. Mark their types // Plugin exports symbols as interfaces. Mark their types
// as UsedInIface. // as UsedInIface.
tsym.Set(obj.AttrUsedInIface, true) tsym.Set(obj.AttrUsedInIface, true)
} }
ggloblsym(s, int32(ot), int16(obj.RODATA)) objw.Global(s, int32(ot), int16(obj.RODATA))
ot = 0 ot = 0
s = base.Ctxt.Lookup("go.plugin.exports") s = base.Ctxt.Lookup("go.plugin.exports")
for _, p := range ptabs { for _, p := range ptabs {
ot = dsymptr(s, ot, p.s.Linksym(), 0) ot = objw.SymPtr(s, ot, p.s.Linksym(), 0)
} }
ggloblsym(s, int32(ot), int16(obj.RODATA)) objw.Global(s, int32(ot), int16(obj.RODATA))
} }
} }
@ -1569,9 +1571,9 @@ func dgcptrmask(t *types.Type) *obj.LSym {
if !sym.Uniq() { if !sym.Uniq() {
sym.SetUniq(true) sym.SetUniq(true)
for i, x := range ptrmask { for i, x := range ptrmask {
duint8(lsym, i, x) objw.Uint8(lsym, i, x)
} }
ggloblsym(lsym, int32(len(ptrmask)), obj.DUPOK|obj.RODATA|obj.LOCAL) objw.Global(lsym, int32(len(ptrmask)), obj.DUPOK|obj.RODATA|obj.LOCAL)
lsym.Set(obj.AttrContentAddressable, true) lsym.Set(obj.AttrContentAddressable, true)
} }
return lsym return lsym
@ -1588,7 +1590,7 @@ func fillptrmask(t *types.Type, ptrmask []byte) {
return return
} }
vec := bvalloc(8 * int32(len(ptrmask))) vec := bitvec.New(8 * int32(len(ptrmask)))
onebitwalktype1(t, 0, vec) onebitwalktype1(t, 0, vec)
nptr := types.PtrDataSize(t) / int64(types.PtrSize) nptr := types.PtrDataSize(t) / int64(types.PtrSize)
@ -1637,13 +1639,13 @@ func (p *GCProg) init(lsym *obj.LSym) {
} }
func (p *GCProg) writeByte(x byte) { func (p *GCProg) writeByte(x byte) {
p.symoff = duint8(p.lsym, p.symoff, x) p.symoff = objw.Uint8(p.lsym, p.symoff, x)
} }
func (p *GCProg) end() { func (p *GCProg) end() {
p.w.End() p.w.End()
duint32(p.lsym, 0, uint32(p.symoff-4)) objw.Uint32(p.lsym, 0, uint32(p.symoff-4))
ggloblsym(p.lsym, int32(p.symoff), obj.DUPOK|obj.RODATA|obj.LOCAL) objw.Global(p.lsym, int32(p.symoff), obj.DUPOK|obj.RODATA|obj.LOCAL)
if base.Debug.GCProg > 0 { if base.Debug.GCProg > 0 {
fmt.Fprintf(os.Stderr, "compile: end GCProg for %v\n", p.lsym) fmt.Fprintf(os.Stderr, "compile: end GCProg for %v\n", p.lsym)
} }

View File

@ -18,6 +18,7 @@ import (
"bytes" "bytes"
"cmd/compile/internal/base" "cmd/compile/internal/base"
"cmd/compile/internal/ir" "cmd/compile/internal/ir"
"cmd/compile/internal/objw"
"cmd/compile/internal/ssa" "cmd/compile/internal/ssa"
"cmd/compile/internal/typecheck" "cmd/compile/internal/typecheck"
"cmd/compile/internal/types" "cmd/compile/internal/types"
@ -228,22 +229,22 @@ func dvarint(x *obj.LSym, off int, v int64) int {
panic(fmt.Sprintf("dvarint: bad offset for funcdata - %v", v)) panic(fmt.Sprintf("dvarint: bad offset for funcdata - %v", v))
} }
if v < 1<<7 { if v < 1<<7 {
return duint8(x, off, uint8(v)) return objw.Uint8(x, off, uint8(v))
} }
off = duint8(x, off, uint8((v&127)|128)) off = objw.Uint8(x, off, uint8((v&127)|128))
if v < 1<<14 { if v < 1<<14 {
return duint8(x, off, uint8(v>>7)) return objw.Uint8(x, off, uint8(v>>7))
} }
off = duint8(x, off, uint8(((v>>7)&127)|128)) off = objw.Uint8(x, off, uint8(((v>>7)&127)|128))
if v < 1<<21 { if v < 1<<21 {
return duint8(x, off, uint8(v>>14)) return objw.Uint8(x, off, uint8(v>>14))
} }
off = duint8(x, off, uint8(((v>>14)&127)|128)) off = objw.Uint8(x, off, uint8(((v>>14)&127)|128))
if v < 1<<28 { if v < 1<<28 {
return duint8(x, off, uint8(v>>21)) return objw.Uint8(x, off, uint8(v>>21))
} }
off = duint8(x, off, uint8(((v>>21)&127)|128)) off = objw.Uint8(x, off, uint8(((v>>21)&127)|128))
return duint8(x, off, uint8(v>>28)) return objw.Uint8(x, off, uint8(v>>28))
} }
// emitOpenDeferInfo emits FUNCDATA information about the defers in a function // emitOpenDeferInfo emits FUNCDATA information about the defers in a function
@ -6281,7 +6282,7 @@ func (s *state) addNamedValue(n *ir.Name, v *ssa.Value) {
} }
// Generate a disconnected call to a runtime routine and a return. // Generate a disconnected call to a runtime routine and a return.
func gencallret(pp *Progs, sym *obj.LSym) *obj.Prog { func gencallret(pp *objw.Progs, sym *obj.LSym) *obj.Prog {
p := pp.Prog(obj.ACALL) p := pp.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN p.To.Name = obj.NAME_EXTERN
@ -6298,7 +6299,7 @@ type Branch struct {
// SSAGenState contains state needed during Prog generation. // SSAGenState contains state needed during Prog generation.
type SSAGenState struct { type SSAGenState struct {
pp *Progs pp *objw.Progs
// Branches remembers all the branch instructions we've seen // Branches remembers all the branch instructions we've seen
// and where they would like to go. // and where they would like to go.
@ -6344,12 +6345,12 @@ func (s *SSAGenState) Prog(as obj.As) *obj.Prog {
// Pc returns the current Prog. // Pc returns the current Prog.
func (s *SSAGenState) Pc() *obj.Prog { func (s *SSAGenState) Pc() *obj.Prog {
return s.pp.next return s.pp.Next
} }
// SetPos sets the current source position. // SetPos sets the current source position.
func (s *SSAGenState) SetPos(pos src.XPos) { func (s *SSAGenState) SetPos(pos src.XPos) {
s.pp.pos = pos s.pp.Pos = pos
} }
// Br emits a single branch instruction and returns the instruction. // Br emits a single branch instruction and returns the instruction.
@ -6385,7 +6386,7 @@ func (s *SSAGenState) DebugFriendlySetPosFrom(v *ssa.Value) {
} }
s.SetPos(p) s.SetPos(p)
} else { } else {
s.SetPos(s.pp.pos.WithNotStmt()) s.SetPos(s.pp.Pos.WithNotStmt())
} }
} }
} }
@ -6397,7 +6398,7 @@ func (s byXoffset) Len() int { return len(s) }
func (s byXoffset) Less(i, j int) bool { return s[i].FrameOffset() < s[j].FrameOffset() } func (s byXoffset) Less(i, j int) bool { return s[i].FrameOffset() < s[j].FrameOffset() }
func (s byXoffset) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s byXoffset) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func emitStackObjects(e *ssafn, pp *Progs) { func emitStackObjects(e *ssafn, pp *objw.Progs) {
var vars []*ir.Name var vars []*ir.Name
for _, n := range e.curfn.Dcl { for _, n := range e.curfn.Dcl {
if livenessShouldTrack(n) && n.Addrtaken() { if livenessShouldTrack(n) && n.Addrtaken() {
@ -6415,21 +6416,21 @@ func emitStackObjects(e *ssafn, pp *Progs) {
// Format must match runtime/stack.go:stackObjectRecord. // Format must match runtime/stack.go:stackObjectRecord.
x := e.curfn.LSym.Func().StackObjects x := e.curfn.LSym.Func().StackObjects
off := 0 off := 0
off = duintptr(x, off, uint64(len(vars))) off = objw.Uintptr(x, off, uint64(len(vars)))
for _, v := range vars { for _, v := range vars {
// Note: arguments and return values have non-negative Xoffset, // Note: arguments and return values have non-negative Xoffset,
// in which case the offset is relative to argp. // in which case the offset is relative to argp.
// Locals have a negative Xoffset, in which case the offset is relative to varp. // Locals have a negative Xoffset, in which case the offset is relative to varp.
off = duintptr(x, off, uint64(v.FrameOffset())) off = objw.Uintptr(x, off, uint64(v.FrameOffset()))
if !types.TypeSym(v.Type()).Siggen() { if !types.TypeSym(v.Type()).Siggen() {
e.Fatalf(v.Pos(), "stack object's type symbol not generated for type %s", v.Type()) e.Fatalf(v.Pos(), "stack object's type symbol not generated for type %s", v.Type())
} }
off = dsymptr(x, off, dtypesym(v.Type()), 0) off = objw.SymPtr(x, off, dtypesym(v.Type()), 0)
} }
// Emit a funcdata pointing at the stack object data. // Emit a funcdata pointing at the stack object data.
p := pp.Prog(obj.AFUNCDATA) p := pp.Prog(obj.AFUNCDATA)
Addrconst(&p.From, objabi.FUNCDATA_StackObjects) p.From.SetConst(objabi.FUNCDATA_StackObjects)
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN p.To.Name = obj.NAME_EXTERN
p.To.Sym = x p.To.Sym = x
@ -6442,7 +6443,7 @@ func emitStackObjects(e *ssafn, pp *Progs) {
} }
// genssa appends entries to pp for each instruction in f. // genssa appends entries to pp for each instruction in f.
func genssa(f *ssa.Func, pp *Progs) { func genssa(f *ssa.Func, pp *objw.Progs) {
var s SSAGenState var s SSAGenState
e := f.Frontend().(*ssafn) e := f.Frontend().(*ssafn)
@ -6455,7 +6456,7 @@ func genssa(f *ssa.Func, pp *Progs) {
// This function uses open-coded defers -- write out the funcdata // This function uses open-coded defers -- write out the funcdata
// info that we computed at the end of genssa. // info that we computed at the end of genssa.
p := pp.Prog(obj.AFUNCDATA) p := pp.Prog(obj.AFUNCDATA)
Addrconst(&p.From, objabi.FUNCDATA_OpenCodedDeferInfo) p.From.SetConst(objabi.FUNCDATA_OpenCodedDeferInfo)
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN p.To.Name = obj.NAME_EXTERN
p.To.Sym = openDeferInfo p.To.Sym = openDeferInfo
@ -6471,7 +6472,7 @@ func genssa(f *ssa.Func, pp *Progs) {
progToValue = make(map[*obj.Prog]*ssa.Value, f.NumValues()) progToValue = make(map[*obj.Prog]*ssa.Value, f.NumValues())
progToBlock = make(map[*obj.Prog]*ssa.Block, f.NumBlocks()) progToBlock = make(map[*obj.Prog]*ssa.Block, f.NumBlocks())
f.Logf("genssa %s\n", f.Name) f.Logf("genssa %s\n", f.Name)
progToBlock[s.pp.next] = f.Blocks[0] progToBlock[s.pp.Next] = f.Blocks[0]
} }
s.ScratchFpMem = e.scratchFpMem s.ScratchFpMem = e.scratchFpMem
@ -6509,7 +6510,7 @@ func genssa(f *ssa.Func, pp *Progs) {
// Emit basic blocks // Emit basic blocks
for i, b := range f.Blocks { for i, b := range f.Blocks {
s.bstart[b.ID] = s.pp.next s.bstart[b.ID] = s.pp.Next
s.lineRunStart = nil s.lineRunStart = nil
// Attach a "default" liveness info. Normally this will be // Attach a "default" liveness info. Normally this will be
@ -6518,12 +6519,12 @@ func genssa(f *ssa.Func, pp *Progs) {
// instruction. We won't use the actual liveness map on a // instruction. We won't use the actual liveness map on a
// control instruction. Just mark it something that is // control instruction. Just mark it something that is
// preemptible, unless this function is "all unsafe". // preemptible, unless this function is "all unsafe".
s.pp.nextLive = LivenessIndex{-1, allUnsafe(f)} s.pp.NextLive = objw.LivenessIndex{StackMapIndex: -1, IsUnsafePoint: allUnsafe(f)}
// Emit values in block // Emit values in block
thearch.SSAMarkMoves(&s, b) thearch.SSAMarkMoves(&s, b)
for _, v := range b.Values { for _, v := range b.Values {
x := s.pp.next x := s.pp.Next
s.DebugFriendlySetPosFrom(v) s.DebugFriendlySetPosFrom(v)
switch v.Op { switch v.Op {
@ -6561,7 +6562,7 @@ func genssa(f *ssa.Func, pp *Progs) {
default: default:
// Attach this safe point to the next // Attach this safe point to the next
// instruction. // instruction.
s.pp.nextLive = s.livenessMap.Get(v) s.pp.NextLive = s.livenessMap.Get(v)
// Special case for first line in function; move it to the start. // Special case for first line in function; move it to the start.
if firstPos != src.NoXPos { if firstPos != src.NoXPos {
@ -6573,17 +6574,17 @@ func genssa(f *ssa.Func, pp *Progs) {
} }
if base.Ctxt.Flag_locationlists { if base.Ctxt.Flag_locationlists {
valueToProgAfter[v.ID] = s.pp.next valueToProgAfter[v.ID] = s.pp.Next
} }
if f.PrintOrHtmlSSA { if f.PrintOrHtmlSSA {
for ; x != s.pp.next; x = x.Link { for ; x != s.pp.Next; x = x.Link {
progToValue[x] = v progToValue[x] = v
} }
} }
} }
// If this is an empty infinite loop, stick a hardware NOP in there so that debuggers are less confused. // If this is an empty infinite loop, stick a hardware NOP in there so that debuggers are less confused.
if s.bstart[b.ID] == s.pp.next && len(b.Succs) == 1 && b.Succs[0].Block() == b { if s.bstart[b.ID] == s.pp.Next && len(b.Succs) == 1 && b.Succs[0].Block() == b {
p := thearch.Ginsnop(s.pp) p := thearch.Ginsnop(s.pp)
p.Pos = p.Pos.WithIsStmt() p.Pos = p.Pos.WithIsStmt()
if b.Pos == src.NoXPos { if b.Pos == src.NoXPos {
@ -6603,11 +6604,11 @@ func genssa(f *ssa.Func, pp *Progs) {
// line numbers for otherwise empty blocks. // line numbers for otherwise empty blocks.
next = f.Blocks[i+1] next = f.Blocks[i+1]
} }
x := s.pp.next x := s.pp.Next
s.SetPos(b.Pos) s.SetPos(b.Pos)
thearch.SSAGenBlock(&s, b, next) thearch.SSAGenBlock(&s, b, next)
if f.PrintOrHtmlSSA { if f.PrintOrHtmlSSA {
for ; x != s.pp.next; x = x.Link { for ; x != s.pp.Next; x = x.Link {
progToBlock[x] = b progToBlock[x] = b
} }
} }
@ -6623,7 +6624,7 @@ func genssa(f *ssa.Func, pp *Progs) {
// When doing open-coded defers, generate a disconnected call to // When doing open-coded defers, generate a disconnected call to
// deferreturn and a return. This will be used to during panic // deferreturn and a return. This will be used to during panic
// recovery to unwind the stack and return back to the runtime. // recovery to unwind the stack and return back to the runtime.
s.pp.nextLive = s.livenessMap.deferreturn s.pp.NextLive = s.livenessMap.deferreturn
gencallret(pp, ir.Syms.Deferreturn) gencallret(pp, ir.Syms.Deferreturn)
} }
@ -6655,7 +6656,7 @@ func genssa(f *ssa.Func, pp *Progs) {
// some of the inline marks. // some of the inline marks.
// Use this instruction instead. // Use this instruction instead.
p.Pos = p.Pos.WithIsStmt() // promote position to a statement p.Pos = p.Pos.WithIsStmt() // promote position to a statement
pp.curfn.LSym.Func().AddInlMark(p, inlMarks[m]) pp.CurFunc.LSym.Func().AddInlMark(p, inlMarks[m])
// Make the inline mark a real nop, so it doesn't generate any code. // Make the inline mark a real nop, so it doesn't generate any code.
m.As = obj.ANOP m.As = obj.ANOP
m.Pos = src.NoXPos m.Pos = src.NoXPos
@ -6667,7 +6668,7 @@ func genssa(f *ssa.Func, pp *Progs) {
// Any unmatched inline marks now need to be added to the inlining tree (and will generate a nop instruction). // Any unmatched inline marks now need to be added to the inlining tree (and will generate a nop instruction).
for _, p := range inlMarkList { for _, p := range inlMarkList {
if p.As != obj.ANOP { if p.As != obj.ANOP {
pp.curfn.LSym.Func().AddInlMark(p, inlMarks[p]) pp.CurFunc.LSym.Func().AddInlMark(p, inlMarks[p])
} }
} }
} }
@ -7048,7 +7049,7 @@ func (s *SSAGenState) AddrScratch(a *obj.Addr) {
// Call returns a new CALL instruction for the SSA value v. // Call returns a new CALL instruction for the SSA value v.
// It uses PrepareCall to prepare the call. // It uses PrepareCall to prepare the call.
func (s *SSAGenState) Call(v *ssa.Value) *obj.Prog { func (s *SSAGenState) Call(v *ssa.Value) *obj.Prog {
pPosIsStmt := s.pp.pos.IsStmt() // The statement-ness fo the call comes from ssaGenState pPosIsStmt := s.pp.Pos.IsStmt() // The statement-ness fo the call comes from ssaGenState
s.PrepareCall(v) s.PrepareCall(v)
p := s.Prog(obj.ACALL) p := s.Prog(obj.ACALL)
@ -7106,7 +7107,7 @@ func (s *SSAGenState) PrepareCall(v *ssa.Value) {
// Record call graph information for nowritebarrierrec // Record call graph information for nowritebarrierrec
// analysis. // analysis.
if nowritebarrierrecCheck != nil { if nowritebarrierrecCheck != nil {
nowritebarrierrecCheck.recordCall(s.pp.curfn, call.Fn, v.Pos) nowritebarrierrecCheck.recordCall(s.pp.CurFunc, call.Fn, v.Pos)
} }
} }

View File

@ -6,21 +6,21 @@ package mips
import ( import (
"cmd/compile/internal/base" "cmd/compile/internal/base"
"cmd/compile/internal/gc" "cmd/compile/internal/objw"
"cmd/compile/internal/types" "cmd/compile/internal/types"
"cmd/internal/obj" "cmd/internal/obj"
"cmd/internal/obj/mips" "cmd/internal/obj/mips"
) )
// TODO(mips): implement DUFFZERO // TODO(mips): implement DUFFZERO
func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
if cnt == 0 { if cnt == 0 {
return p return p
} }
if cnt < int64(4*types.PtrSize) { if cnt < int64(4*types.PtrSize) {
for i := int64(0); i < cnt; i += int64(types.PtrSize) { for i := int64(0); i < cnt; i += int64(types.PtrSize) {
p = pp.Appendpp(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, base.Ctxt.FixedFrameSize()+off+i) p = pp.Append(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, base.Ctxt.FixedFrameSize()+off+i)
} }
} else { } else {
//fmt.Printf("zerorange frame:%v, lo: %v, hi:%v \n", frame ,lo, hi) //fmt.Printf("zerorange frame:%v, lo: %v, hi:%v \n", frame ,lo, hi)
@ -30,22 +30,22 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
// MOVW R0, (Widthptr)r1 // MOVW R0, (Widthptr)r1
// ADD $Widthptr, r1 // ADD $Widthptr, r1
// BNE r1, r2, loop // BNE r1, r2, loop
p = pp.Appendpp(p, mips.AADD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-4, obj.TYPE_REG, mips.REGRT1, 0) p = pp.Append(p, mips.AADD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-4, obj.TYPE_REG, mips.REGRT1, 0)
p.Reg = mips.REGSP p.Reg = mips.REGSP
p = pp.Appendpp(p, mips.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, mips.REGRT2, 0) p = pp.Append(p, mips.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, mips.REGRT2, 0)
p.Reg = mips.REGRT1 p.Reg = mips.REGRT1
p = pp.Appendpp(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGRT1, int64(types.PtrSize)) p = pp.Append(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGRT1, int64(types.PtrSize))
p1 := p p1 := p
p = pp.Appendpp(p, mips.AADD, obj.TYPE_CONST, 0, int64(types.PtrSize), obj.TYPE_REG, mips.REGRT1, 0) p = pp.Append(p, mips.AADD, obj.TYPE_CONST, 0, int64(types.PtrSize), obj.TYPE_REG, mips.REGRT1, 0)
p = pp.Appendpp(p, mips.ABNE, obj.TYPE_REG, mips.REGRT1, 0, obj.TYPE_BRANCH, 0, 0) p = pp.Append(p, mips.ABNE, obj.TYPE_REG, mips.REGRT1, 0, obj.TYPE_BRANCH, 0, 0)
p.Reg = mips.REGRT2 p.Reg = mips.REGRT2
gc.Patch(p, p1) p.To.SetTarget(p1)
} }
return p return p
} }
func ginsnop(pp *gc.Progs) *obj.Prog { func ginsnop(pp *objw.Progs) *obj.Prog {
p := pp.Prog(mips.ANOR) p := pp.Prog(mips.ANOR)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REG_R0 p.From.Reg = mips.REG_R0

View File

@ -427,7 +427,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p4.From.Reg = v.Args[1].Reg() p4.From.Reg = v.Args[1].Reg()
p4.Reg = mips.REG_R1 p4.Reg = mips.REG_R1
p4.To.Type = obj.TYPE_BRANCH p4.To.Type = obj.TYPE_BRANCH
gc.Patch(p4, p2) p4.To.SetTarget(p2)
case ssa.OpMIPSLoweredMove: case ssa.OpMIPSLoweredMove:
// SUBU $4, R1 // SUBU $4, R1
// MOVW 4(R1), Rtmp // MOVW 4(R1), Rtmp
@ -480,7 +480,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p6.From.Reg = v.Args[2].Reg() p6.From.Reg = v.Args[2].Reg()
p6.Reg = mips.REG_R1 p6.Reg = mips.REG_R1
p6.To.Type = obj.TYPE_BRANCH p6.To.Type = obj.TYPE_BRANCH
gc.Patch(p6, p2) p6.To.SetTarget(p2)
case ssa.OpMIPSCALLstatic, ssa.OpMIPSCALLclosure, ssa.OpMIPSCALLinter: case ssa.OpMIPSCALLstatic, ssa.OpMIPSCALLclosure, ssa.OpMIPSCALLinter:
s.Call(v) s.Call(v)
case ssa.OpMIPSLoweredWB: case ssa.OpMIPSLoweredWB:
@ -577,7 +577,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p3.From.Type = obj.TYPE_REG p3.From.Type = obj.TYPE_REG
p3.From.Reg = mips.REGTMP p3.From.Reg = mips.REGTMP
p3.To.Type = obj.TYPE_BRANCH p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p) p3.To.SetTarget(p)
s.Prog(mips.ASYNC) s.Prog(mips.ASYNC)
case ssa.OpMIPSLoweredAtomicAdd: case ssa.OpMIPSLoweredAtomicAdd:
@ -613,7 +613,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p3.From.Type = obj.TYPE_REG p3.From.Type = obj.TYPE_REG
p3.From.Reg = mips.REGTMP p3.From.Reg = mips.REGTMP
p3.To.Type = obj.TYPE_BRANCH p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p) p3.To.SetTarget(p)
s.Prog(mips.ASYNC) s.Prog(mips.ASYNC)
@ -657,7 +657,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p3.From.Type = obj.TYPE_REG p3.From.Type = obj.TYPE_REG
p3.From.Reg = mips.REGTMP p3.From.Reg = mips.REGTMP
p3.To.Type = obj.TYPE_BRANCH p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p) p3.To.SetTarget(p)
s.Prog(mips.ASYNC) s.Prog(mips.ASYNC)
@ -701,7 +701,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p3.From.Type = obj.TYPE_REG p3.From.Type = obj.TYPE_REG
p3.From.Reg = mips.REGTMP p3.From.Reg = mips.REGTMP
p3.To.Type = obj.TYPE_BRANCH p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p) p3.To.SetTarget(p)
s.Prog(mips.ASYNC) s.Prog(mips.ASYNC)
@ -750,12 +750,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p5.From.Type = obj.TYPE_REG p5.From.Type = obj.TYPE_REG
p5.From.Reg = v.Reg0() p5.From.Reg = v.Reg0()
p5.To.Type = obj.TYPE_BRANCH p5.To.Type = obj.TYPE_BRANCH
gc.Patch(p5, p1) p5.To.SetTarget(p1)
s.Prog(mips.ASYNC) s.Prog(mips.ASYNC)
p6 := s.Prog(obj.ANOP) p6 := s.Prog(obj.ANOP)
gc.Patch(p2, p6) p2.To.SetTarget(p6)
case ssa.OpMIPSLoweredNilCheck: case ssa.OpMIPSLoweredNilCheck:
// Issue a load which will fault if arg is nil. // Issue a load which will fault if arg is nil.

View File

@ -5,25 +5,25 @@
package mips64 package mips64
import ( import (
"cmd/compile/internal/gc"
"cmd/compile/internal/ir" "cmd/compile/internal/ir"
"cmd/compile/internal/objw"
"cmd/compile/internal/types" "cmd/compile/internal/types"
"cmd/internal/obj" "cmd/internal/obj"
"cmd/internal/obj/mips" "cmd/internal/obj/mips"
) )
func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
if cnt == 0 { if cnt == 0 {
return p return p
} }
if cnt < int64(4*types.PtrSize) { if cnt < int64(4*types.PtrSize) {
for i := int64(0); i < cnt; i += int64(types.PtrSize) { for i := int64(0); i < cnt; i += int64(types.PtrSize) {
p = pp.Appendpp(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, 8+off+i) p = pp.Append(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, 8+off+i)
} }
} else if cnt <= int64(128*types.PtrSize) { } else if cnt <= int64(128*types.PtrSize) {
p = pp.Appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, mips.REGRT1, 0) p = pp.Append(p, mips.AADDV, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, mips.REGRT1, 0)
p.Reg = mips.REGSP p.Reg = mips.REGSP
p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0) p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
p.To.Name = obj.NAME_EXTERN p.To.Name = obj.NAME_EXTERN
p.To.Sym = ir.Syms.Duffzero p.To.Sym = ir.Syms.Duffzero
p.To.Offset = 8 * (128 - cnt/int64(types.PtrSize)) p.To.Offset = 8 * (128 - cnt/int64(types.PtrSize))
@ -34,22 +34,22 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
// MOVV R0, (Widthptr)r1 // MOVV R0, (Widthptr)r1
// ADDV $Widthptr, r1 // ADDV $Widthptr, r1
// BNE r1, r2, loop // BNE r1, r2, loop
p = pp.Appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, mips.REGRT1, 0) p = pp.Append(p, mips.AADDV, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, mips.REGRT1, 0)
p.Reg = mips.REGSP p.Reg = mips.REGSP
p = pp.Appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, mips.REGRT2, 0) p = pp.Append(p, mips.AADDV, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, mips.REGRT2, 0)
p.Reg = mips.REGRT1 p.Reg = mips.REGRT1
p = pp.Appendpp(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGRT1, int64(types.PtrSize)) p = pp.Append(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGRT1, int64(types.PtrSize))
p1 := p p1 := p
p = pp.Appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, int64(types.PtrSize), obj.TYPE_REG, mips.REGRT1, 0) p = pp.Append(p, mips.AADDV, obj.TYPE_CONST, 0, int64(types.PtrSize), obj.TYPE_REG, mips.REGRT1, 0)
p = pp.Appendpp(p, mips.ABNE, obj.TYPE_REG, mips.REGRT1, 0, obj.TYPE_BRANCH, 0, 0) p = pp.Append(p, mips.ABNE, obj.TYPE_REG, mips.REGRT1, 0, obj.TYPE_BRANCH, 0, 0)
p.Reg = mips.REGRT2 p.Reg = mips.REGRT2
gc.Patch(p, p1) p.To.SetTarget(p1)
} }
return p return p
} }
func ginsnop(pp *gc.Progs) *obj.Prog { func ginsnop(pp *objw.Progs) *obj.Prog {
p := pp.Prog(mips.ANOR) p := pp.Prog(mips.ANOR)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REG_R0 p.From.Reg = mips.REG_R0

View File

@ -428,7 +428,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p4.From.Reg = v.Args[1].Reg() p4.From.Reg = v.Args[1].Reg()
p4.Reg = mips.REG_R1 p4.Reg = mips.REG_R1
p4.To.Type = obj.TYPE_BRANCH p4.To.Type = obj.TYPE_BRANCH
gc.Patch(p4, p2) p4.To.SetTarget(p2)
case ssa.OpMIPS64DUFFCOPY: case ssa.OpMIPS64DUFFCOPY:
p := s.Prog(obj.ADUFFCOPY) p := s.Prog(obj.ADUFFCOPY)
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
@ -490,7 +490,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p6.From.Reg = v.Args[2].Reg() p6.From.Reg = v.Args[2].Reg()
p6.Reg = mips.REG_R1 p6.Reg = mips.REG_R1
p6.To.Type = obj.TYPE_BRANCH p6.To.Type = obj.TYPE_BRANCH
gc.Patch(p6, p2) p6.To.SetTarget(p2)
case ssa.OpMIPS64CALLstatic, ssa.OpMIPS64CALLclosure, ssa.OpMIPS64CALLinter: case ssa.OpMIPS64CALLstatic, ssa.OpMIPS64CALLclosure, ssa.OpMIPS64CALLinter:
s.Call(v) s.Call(v)
case ssa.OpMIPS64LoweredWB: case ssa.OpMIPS64LoweredWB:
@ -579,7 +579,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p3.From.Type = obj.TYPE_REG p3.From.Type = obj.TYPE_REG
p3.From.Reg = mips.REGTMP p3.From.Reg = mips.REGTMP
p3.To.Type = obj.TYPE_BRANCH p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p) p3.To.SetTarget(p)
s.Prog(mips.ASYNC) s.Prog(mips.ASYNC)
case ssa.OpMIPS64LoweredAtomicAdd32, ssa.OpMIPS64LoweredAtomicAdd64: case ssa.OpMIPS64LoweredAtomicAdd32, ssa.OpMIPS64LoweredAtomicAdd64:
// SYNC // SYNC
@ -616,7 +616,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p3.From.Type = obj.TYPE_REG p3.From.Type = obj.TYPE_REG
p3.From.Reg = mips.REGTMP p3.From.Reg = mips.REGTMP
p3.To.Type = obj.TYPE_BRANCH p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p) p3.To.SetTarget(p)
s.Prog(mips.ASYNC) s.Prog(mips.ASYNC)
p4 := s.Prog(mips.AADDVU) p4 := s.Prog(mips.AADDVU)
p4.From.Type = obj.TYPE_REG p4.From.Type = obj.TYPE_REG
@ -659,7 +659,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p3.From.Type = obj.TYPE_REG p3.From.Type = obj.TYPE_REG
p3.From.Reg = mips.REGTMP p3.From.Reg = mips.REGTMP
p3.To.Type = obj.TYPE_BRANCH p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p) p3.To.SetTarget(p)
s.Prog(mips.ASYNC) s.Prog(mips.ASYNC)
p4 := s.Prog(mips.AADDVU) p4 := s.Prog(mips.AADDVU)
p4.From.Type = obj.TYPE_CONST p4.From.Type = obj.TYPE_CONST
@ -712,9 +712,9 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p5.From.Type = obj.TYPE_REG p5.From.Type = obj.TYPE_REG
p5.From.Reg = v.Reg0() p5.From.Reg = v.Reg0()
p5.To.Type = obj.TYPE_BRANCH p5.To.Type = obj.TYPE_BRANCH
gc.Patch(p5, p1) p5.To.SetTarget(p1)
p6 := s.Prog(mips.ASYNC) p6 := s.Prog(mips.ASYNC)
gc.Patch(p2, p6) p2.To.SetTarget(p6)
case ssa.OpMIPS64LoweredNilCheck: case ssa.OpMIPS64LoweredNilCheck:
// Issue a load which will fault if arg is nil. // Issue a load which will fault if arg is nil.
p := s.Prog(mips.AMOVB) p := s.Prog(mips.AMOVB)
@ -751,7 +751,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p3.To.Type = obj.TYPE_REG p3.To.Type = obj.TYPE_REG
p3.To.Reg = v.Reg() p3.To.Reg = v.Reg()
p4 := s.Prog(obj.ANOP) // not a machine instruction, for branch to land p4 := s.Prog(obj.ANOP) // not a machine instruction, for branch to land
gc.Patch(p2, p4) p2.To.SetTarget(p4)
case ssa.OpMIPS64LoweredGetClosurePtr: case ssa.OpMIPS64LoweredGetClosurePtr:
// Closure pointer is R22 (mips.REGCTXT). // Closure pointer is R22 (mips.REGCTXT).
gc.CheckLoweredGetClosurePtr(v) gc.CheckLoweredGetClosurePtr(v)

View File

@ -0,0 +1,72 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package objw
import (
"cmd/compile/internal/base"
"cmd/compile/internal/bitvec"
"cmd/compile/internal/types"
"cmd/internal/obj"
)
func Uint8(s *obj.LSym, off int, v uint8) int {
return UintN(s, off, uint64(v), 1)
}
func Uint16(s *obj.LSym, off int, v uint16) int {
return UintN(s, off, uint64(v), 2)
}
func Uint32(s *obj.LSym, off int, v uint32) int {
return UintN(s, off, uint64(v), 4)
}
func Uintptr(s *obj.LSym, off int, v uint64) int {
return UintN(s, off, v, types.PtrSize)
}
func UintN(s *obj.LSym, off int, v uint64, wid int) int {
if off&(wid-1) != 0 {
base.Fatalf("duintxxLSym: misaligned: v=%d wid=%d off=%d", v, wid, off)
}
s.WriteInt(base.Ctxt, int64(off), wid, int64(v))
return off + wid
}
func SymPtr(s *obj.LSym, off int, x *obj.LSym, xoff int) int {
off = int(types.Rnd(int64(off), int64(types.PtrSize)))
s.WriteAddr(base.Ctxt, int64(off), types.PtrSize, x, int64(xoff))
off += types.PtrSize
return off
}
func SymPtrOff(s *obj.LSym, off int, x *obj.LSym) int {
s.WriteOff(base.Ctxt, int64(off), x, 0)
off += 4
return off
}
func SymPtrWeakOff(s *obj.LSym, off int, x *obj.LSym) int {
s.WriteWeakOff(base.Ctxt, int64(off), x, 0)
off += 4
return off
}
func Global(s *obj.LSym, width int32, flags int16) {
if flags&obj.LOCAL != 0 {
s.Set(obj.AttrLocal, true)
flags &^= obj.LOCAL
}
base.Ctxt.Globl(s, int64(width), int(flags))
}
func BitVec(s *obj.LSym, off int, bv bitvec.BitVec) int {
// Runtime reads the bitmaps as byte arrays. Oblige.
for j := 0; int32(j) < bv.N; j += 8 {
word := bv.B[j/32]
off = Uint8(s, off, uint8(word>>(uint(j)%32)))
}
return off
}

View File

@ -0,0 +1,218 @@
// Derived from Inferno utils/6c/txt.c
// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/6c/txt.c
//
// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
// Portions Copyright © 1997-1999 Vita Nuova Limited
// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
// Portions Copyright © 2004,2006 Bruce Ellis
// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
// Portions Copyright © 2009 The Go Authors. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package objw
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/ssa"
"cmd/internal/obj"
"cmd/internal/objabi"
"cmd/internal/src"
)
var sharedProgArray = new([10000]obj.Prog) // *T instead of T to work around issue 19839
// NewProgs returns a new Progs for fn.
// worker indicates which of the backend workers will use the Progs.
func NewProgs(fn *ir.Func, worker int) *Progs {
pp := new(Progs)
if base.Ctxt.CanReuseProgs() {
sz := len(sharedProgArray) / base.Flag.LowerC
pp.Cache = sharedProgArray[sz*worker : sz*(worker+1)]
}
pp.CurFunc = fn
// prime the pump
pp.Next = pp.NewProg()
pp.Clear(pp.Next)
pp.Pos = fn.Pos()
pp.SetText(fn)
// PCDATA tables implicitly start with index -1.
pp.PrevLive = LivenessIndex{-1, false}
pp.NextLive = pp.PrevLive
return pp
}
// Progs accumulates Progs for a function and converts them into machine code.
type Progs struct {
Text *obj.Prog // ATEXT Prog for this function
Next *obj.Prog // next Prog
PC int64 // virtual PC; count of Progs
Pos src.XPos // position to use for new Progs
CurFunc *ir.Func // fn these Progs are for
Cache []obj.Prog // local progcache
CacheIndex int // first free element of progcache
NextLive LivenessIndex // liveness index for the next Prog
PrevLive LivenessIndex // last emitted liveness index
}
// LivenessIndex stores the liveness map information for a Value.
type LivenessIndex struct {
StackMapIndex int
// IsUnsafePoint indicates that this is an unsafe-point.
//
// Note that it's possible for a call Value to have a stack
// map while also being an unsafe-point. This means it cannot
// be preempted at this instruction, but that a preemption or
// stack growth may happen in the called function.
IsUnsafePoint bool
}
// StackMapDontCare indicates that the stack map index at a Value
// doesn't matter.
//
// This is a sentinel value that should never be emitted to the PCDATA
// stream. We use -1000 because that's obviously never a valid stack
// index (but -1 is).
const StackMapDontCare = -1000
// LivenessDontCare indicates that the liveness information doesn't
// matter. Currently it is used in deferreturn liveness when we don't
// actually need it. It should never be emitted to the PCDATA stream.
var LivenessDontCare = LivenessIndex{StackMapDontCare, true}
func (idx LivenessIndex) StackMapValid() bool {
return idx.StackMapIndex != StackMapDontCare
}
func (pp *Progs) NewProg() *obj.Prog {
var p *obj.Prog
if pp.CacheIndex < len(pp.Cache) {
p = &pp.Cache[pp.CacheIndex]
pp.CacheIndex++
} else {
p = new(obj.Prog)
}
p.Ctxt = base.Ctxt
return p
}
// Flush converts from pp to machine code.
func (pp *Progs) Flush() {
plist := &obj.Plist{Firstpc: pp.Text, Curfn: pp.CurFunc}
obj.Flushplist(base.Ctxt, plist, pp.NewProg, base.Ctxt.Pkgpath)
}
// Free clears pp and any associated resources.
func (pp *Progs) Free() {
if base.Ctxt.CanReuseProgs() {
// Clear progs to enable GC and avoid abuse.
s := pp.Cache[:pp.CacheIndex]
for i := range s {
s[i] = obj.Prog{}
}
}
// Clear pp to avoid abuse.
*pp = Progs{}
}
// Prog adds a Prog with instruction As to pp.
func (pp *Progs) Prog(as obj.As) *obj.Prog {
if pp.NextLive.StackMapValid() && pp.NextLive.StackMapIndex != pp.PrevLive.StackMapIndex {
// Emit stack map index change.
idx := pp.NextLive.StackMapIndex
pp.PrevLive.StackMapIndex = idx
p := pp.Prog(obj.APCDATA)
p.From.SetConst(objabi.PCDATA_StackMapIndex)
p.To.SetConst(int64(idx))
}
if pp.NextLive.IsUnsafePoint != pp.PrevLive.IsUnsafePoint {
// Emit unsafe-point marker.
pp.PrevLive.IsUnsafePoint = pp.NextLive.IsUnsafePoint
p := pp.Prog(obj.APCDATA)
p.From.SetConst(objabi.PCDATA_UnsafePoint)
if pp.NextLive.IsUnsafePoint {
p.To.SetConst(objabi.PCDATA_UnsafePointUnsafe)
} else {
p.To.SetConst(objabi.PCDATA_UnsafePointSafe)
}
}
p := pp.Next
pp.Next = pp.NewProg()
pp.Clear(pp.Next)
p.Link = pp.Next
if !pp.Pos.IsKnown() && base.Flag.K != 0 {
base.Warn("prog: unknown position (line 0)")
}
p.As = as
p.Pos = pp.Pos
if pp.Pos.IsStmt() == src.PosIsStmt {
// Clear IsStmt for later Progs at this pos provided that as can be marked as a stmt
if ssa.LosesStmtMark(as) {
return p
}
pp.Pos = pp.Pos.WithNotStmt()
}
return p
}
func (pp *Progs) Clear(p *obj.Prog) {
obj.Nopout(p)
p.As = obj.AEND
p.Pc = pp.PC
pp.PC++
}
func (pp *Progs) Append(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int16, foffset int64, ttype obj.AddrType, treg int16, toffset int64) *obj.Prog {
q := pp.NewProg()
pp.Clear(q)
q.As = as
q.Pos = p.Pos
q.From.Type = ftype
q.From.Reg = freg
q.From.Offset = foffset
q.To.Type = ttype
q.To.Reg = treg
q.To.Offset = toffset
q.Link = p.Link
p.Link = q
return q
}
func (pp *Progs) SetText(fn *ir.Func) {
if pp.Text != nil {
base.Fatalf("Progs.settext called twice")
}
ptxt := pp.Prog(obj.ATEXT)
pp.Text = ptxt
fn.LSym.Func().Text = ptxt
ptxt.From.Type = obj.TYPE_MEM
ptxt.From.Name = obj.NAME_EXTERN
ptxt.From.Sym = fn.LSym
}

View File

@ -6,46 +6,46 @@ package ppc64
import ( import (
"cmd/compile/internal/base" "cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/compile/internal/ir" "cmd/compile/internal/ir"
"cmd/compile/internal/objw"
"cmd/compile/internal/types" "cmd/compile/internal/types"
"cmd/internal/obj" "cmd/internal/obj"
"cmd/internal/obj/ppc64" "cmd/internal/obj/ppc64"
) )
func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
if cnt == 0 { if cnt == 0 {
return p return p
} }
if cnt < int64(4*types.PtrSize) { if cnt < int64(4*types.PtrSize) {
for i := int64(0); i < cnt; i += int64(types.PtrSize) { for i := int64(0); i < cnt; i += int64(types.PtrSize) {
p = pp.Appendpp(p, ppc64.AMOVD, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGSP, base.Ctxt.FixedFrameSize()+off+i) p = pp.Append(p, ppc64.AMOVD, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGSP, base.Ctxt.FixedFrameSize()+off+i)
} }
} else if cnt <= int64(128*types.PtrSize) { } else if cnt <= int64(128*types.PtrSize) {
p = pp.Appendpp(p, ppc64.AADD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-8, obj.TYPE_REG, ppc64.REGRT1, 0) p = pp.Append(p, ppc64.AADD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-8, obj.TYPE_REG, ppc64.REGRT1, 0)
p.Reg = ppc64.REGSP p.Reg = ppc64.REGSP
p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0) p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
p.To.Name = obj.NAME_EXTERN p.To.Name = obj.NAME_EXTERN
p.To.Sym = ir.Syms.Duffzero p.To.Sym = ir.Syms.Duffzero
p.To.Offset = 4 * (128 - cnt/int64(types.PtrSize)) p.To.Offset = 4 * (128 - cnt/int64(types.PtrSize))
} else { } else {
p = pp.Appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-8, obj.TYPE_REG, ppc64.REGTMP, 0) p = pp.Append(p, ppc64.AMOVD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-8, obj.TYPE_REG, ppc64.REGTMP, 0)
p = pp.Appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT1, 0) p = pp.Append(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT1, 0)
p.Reg = ppc64.REGSP p.Reg = ppc64.REGSP
p = pp.Appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, ppc64.REGTMP, 0) p = pp.Append(p, ppc64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, ppc64.REGTMP, 0)
p = pp.Appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT2, 0) p = pp.Append(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
p.Reg = ppc64.REGRT1 p.Reg = ppc64.REGRT1
p = pp.Appendpp(p, ppc64.AMOVDU, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGRT1, int64(types.PtrSize)) p = pp.Append(p, ppc64.AMOVDU, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGRT1, int64(types.PtrSize))
p1 := p p1 := p
p = pp.Appendpp(p, ppc64.ACMP, obj.TYPE_REG, ppc64.REGRT1, 0, obj.TYPE_REG, ppc64.REGRT2, 0) p = pp.Append(p, ppc64.ACMP, obj.TYPE_REG, ppc64.REGRT1, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
p = pp.Appendpp(p, ppc64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0) p = pp.Append(p, ppc64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
gc.Patch(p, p1) p.To.SetTarget(p1)
} }
return p return p
} }
func ginsnop(pp *gc.Progs) *obj.Prog { func ginsnop(pp *objw.Progs) *obj.Prog {
p := pp.Prog(ppc64.AOR) p := pp.Prog(ppc64.AOR)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = ppc64.REG_R0 p.From.Reg = ppc64.REG_R0
@ -54,7 +54,7 @@ func ginsnop(pp *gc.Progs) *obj.Prog {
return p return p
} }
func ginsnopdefer(pp *gc.Progs) *obj.Prog { func ginsnopdefer(pp *objw.Progs) *obj.Prog {
// On PPC64 two nops are required in the defer case. // On PPC64 two nops are required in the defer case.
// //
// (see gc/cgen.go, gc/plive.go -- copy of comment below) // (see gc/cgen.go, gc/plive.go -- copy of comment below)

View File

@ -210,7 +210,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// BNE retry // BNE retry
p3 := s.Prog(ppc64.ABNE) p3 := s.Prog(ppc64.ABNE)
p3.To.Type = obj.TYPE_BRANCH p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p) p3.To.SetTarget(p)
case ssa.OpPPC64LoweredAtomicAdd32, case ssa.OpPPC64LoweredAtomicAdd32,
ssa.OpPPC64LoweredAtomicAdd64: ssa.OpPPC64LoweredAtomicAdd64:
@ -254,7 +254,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// BNE retry // BNE retry
p4 := s.Prog(ppc64.ABNE) p4 := s.Prog(ppc64.ABNE)
p4.To.Type = obj.TYPE_BRANCH p4.To.Type = obj.TYPE_BRANCH
gc.Patch(p4, p) p4.To.SetTarget(p)
// Ensure a 32 bit result // Ensure a 32 bit result
if v.Op == ssa.OpPPC64LoweredAtomicAdd32 { if v.Op == ssa.OpPPC64LoweredAtomicAdd32 {
@ -300,7 +300,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// BNE retry // BNE retry
p2 := s.Prog(ppc64.ABNE) p2 := s.Prog(ppc64.ABNE)
p2.To.Type = obj.TYPE_BRANCH p2.To.Type = obj.TYPE_BRANCH
gc.Patch(p2, p) p2.To.SetTarget(p)
// ISYNC // ISYNC
pisync := s.Prog(ppc64.AISYNC) pisync := s.Prog(ppc64.AISYNC)
pisync.To.Type = obj.TYPE_NONE pisync.To.Type = obj.TYPE_NONE
@ -348,7 +348,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// ISYNC // ISYNC
pisync := s.Prog(ppc64.AISYNC) pisync := s.Prog(ppc64.AISYNC)
pisync.To.Type = obj.TYPE_NONE pisync.To.Type = obj.TYPE_NONE
gc.Patch(p2, pisync) p2.To.SetTarget(pisync)
case ssa.OpPPC64LoweredAtomicStore8, case ssa.OpPPC64LoweredAtomicStore8,
ssa.OpPPC64LoweredAtomicStore32, ssa.OpPPC64LoweredAtomicStore32,
@ -439,7 +439,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// BNE retry // BNE retry
p4 := s.Prog(ppc64.ABNE) p4 := s.Prog(ppc64.ABNE)
p4.To.Type = obj.TYPE_BRANCH p4.To.Type = obj.TYPE_BRANCH
gc.Patch(p4, p) p4.To.SetTarget(p)
// LWSYNC - Assuming shared data not write-through-required nor // LWSYNC - Assuming shared data not write-through-required nor
// caching-inhibited. See Appendix B.2.1.1 in the ISA 2.07b. // caching-inhibited. See Appendix B.2.1.1 in the ISA 2.07b.
// If the operation is a CAS-Release, then synchronization is not necessary. // If the operation is a CAS-Release, then synchronization is not necessary.
@ -462,10 +462,10 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p7.From.Offset = 0 p7.From.Offset = 0
p7.To.Type = obj.TYPE_REG p7.To.Type = obj.TYPE_REG
p7.To.Reg = out p7.To.Reg = out
gc.Patch(p2, p7) p2.To.SetTarget(p7)
// done (label) // done (label)
p8 := s.Prog(obj.ANOP) p8 := s.Prog(obj.ANOP)
gc.Patch(p6, p8) p6.To.SetTarget(p8)
case ssa.OpPPC64LoweredGetClosurePtr: case ssa.OpPPC64LoweredGetClosurePtr:
// Closure pointer is R11 (already) // Closure pointer is R11 (already)
@ -539,10 +539,10 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = r p.To.Reg = r
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = r0 p.From.Reg = r0
gc.Patch(pbahead, p) pbahead.To.SetTarget(p)
p = s.Prog(obj.ANOP) p = s.Prog(obj.ANOP)
gc.Patch(pbover, p) pbover.To.SetTarget(p)
case ssa.OpPPC64DIVW: case ssa.OpPPC64DIVW:
// word-width version of above // word-width version of above
@ -574,10 +574,10 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = r p.To.Reg = r
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = r0 p.From.Reg = r0
gc.Patch(pbahead, p) pbahead.To.SetTarget(p)
p = s.Prog(obj.ANOP) p = s.Prog(obj.ANOP)
gc.Patch(pbover, p) pbover.To.SetTarget(p)
case ssa.OpPPC64CLRLSLWI: case ssa.OpPPC64CLRLSLWI:
r := v.Reg() r := v.Reg()
@ -1028,7 +1028,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Offset = ppc64.BO_BCTR p.From.Offset = ppc64.BO_BCTR
p.Reg = ppc64.REG_R0 p.Reg = ppc64.REG_R0
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
gc.Patch(p, top) p.To.SetTarget(top)
} }
// When ctr == 1 the loop was not generated but // When ctr == 1 the loop was not generated but
// there are at least 64 bytes to clear, so add // there are at least 64 bytes to clear, so add
@ -1228,7 +1228,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Offset = ppc64.BO_BCTR p.From.Offset = ppc64.BO_BCTR
p.Reg = ppc64.REG_R0 p.Reg = ppc64.REG_R0
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
gc.Patch(p, top) p.To.SetTarget(top)
} }
// when ctr == 1 the loop was not generated but // when ctr == 1 the loop was not generated but
@ -1407,7 +1407,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Offset = ppc64.BO_BCTR p.From.Offset = ppc64.BO_BCTR
p.Reg = ppc64.REG_R0 p.Reg = ppc64.REG_R0
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
gc.Patch(p, top) p.To.SetTarget(top)
// srcReg and dstReg were incremented in the loop, so // srcReg and dstReg were incremented in the loop, so
// later instructions start with offset 0. // later instructions start with offset 0.
@ -1654,7 +1654,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Offset = ppc64.BO_BCTR p.From.Offset = ppc64.BO_BCTR
p.Reg = ppc64.REG_R0 p.Reg = ppc64.REG_R0
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
gc.Patch(p, top) p.To.SetTarget(top)
// srcReg and dstReg were incremented in the loop, so // srcReg and dstReg were incremented in the loop, so
// later instructions start with offset 0. // later instructions start with offset 0.
@ -1840,7 +1840,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// NOP (so the BNE has somewhere to land) // NOP (so the BNE has somewhere to land)
nop := s.Prog(obj.ANOP) nop := s.Prog(obj.ANOP)
gc.Patch(p2, nop) p2.To.SetTarget(nop)
} else { } else {
// Issue a load which will fault if arg is nil. // Issue a load which will fault if arg is nil.

View File

@ -6,14 +6,14 @@ package riscv64
import ( import (
"cmd/compile/internal/base" "cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/compile/internal/ir" "cmd/compile/internal/ir"
"cmd/compile/internal/objw"
"cmd/compile/internal/types" "cmd/compile/internal/types"
"cmd/internal/obj" "cmd/internal/obj"
"cmd/internal/obj/riscv" "cmd/internal/obj/riscv"
) )
func zeroRange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { func zeroRange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
if cnt == 0 { if cnt == 0 {
return p return p
} }
@ -23,15 +23,15 @@ func zeroRange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
if cnt < int64(4*types.PtrSize) { if cnt < int64(4*types.PtrSize) {
for i := int64(0); i < cnt; i += int64(types.PtrSize) { for i := int64(0); i < cnt; i += int64(types.PtrSize) {
p = pp.Appendpp(p, riscv.AMOV, obj.TYPE_REG, riscv.REG_ZERO, 0, obj.TYPE_MEM, riscv.REG_SP, off+i) p = pp.Append(p, riscv.AMOV, obj.TYPE_REG, riscv.REG_ZERO, 0, obj.TYPE_MEM, riscv.REG_SP, off+i)
} }
return p return p
} }
if cnt <= int64(128*types.PtrSize) { if cnt <= int64(128*types.PtrSize) {
p = pp.Appendpp(p, riscv.AADDI, obj.TYPE_CONST, 0, off, obj.TYPE_REG, riscv.REG_A0, 0) p = pp.Append(p, riscv.AADDI, obj.TYPE_CONST, 0, off, obj.TYPE_REG, riscv.REG_A0, 0)
p.Reg = riscv.REG_SP p.Reg = riscv.REG_SP
p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0) p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
p.To.Name = obj.NAME_EXTERN p.To.Name = obj.NAME_EXTERN
p.To.Sym = ir.Syms.Duffzero p.To.Sym = ir.Syms.Duffzero
p.To.Offset = 8 * (128 - cnt/int64(types.PtrSize)) p.To.Offset = 8 * (128 - cnt/int64(types.PtrSize))
@ -45,15 +45,15 @@ func zeroRange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
// MOV ZERO, (T0) // MOV ZERO, (T0)
// ADD $Widthptr, T0 // ADD $Widthptr, T0
// BNE T0, T1, loop // BNE T0, T1, loop
p = pp.Appendpp(p, riscv.AADD, obj.TYPE_CONST, 0, off, obj.TYPE_REG, riscv.REG_T0, 0) p = pp.Append(p, riscv.AADD, obj.TYPE_CONST, 0, off, obj.TYPE_REG, riscv.REG_T0, 0)
p.Reg = riscv.REG_SP p.Reg = riscv.REG_SP
p = pp.Appendpp(p, riscv.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, riscv.REG_T1, 0) p = pp.Append(p, riscv.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, riscv.REG_T1, 0)
p.Reg = riscv.REG_T0 p.Reg = riscv.REG_T0
p = pp.Appendpp(p, riscv.AMOV, obj.TYPE_REG, riscv.REG_ZERO, 0, obj.TYPE_MEM, riscv.REG_T0, 0) p = pp.Append(p, riscv.AMOV, obj.TYPE_REG, riscv.REG_ZERO, 0, obj.TYPE_MEM, riscv.REG_T0, 0)
loop := p loop := p
p = pp.Appendpp(p, riscv.AADD, obj.TYPE_CONST, 0, int64(types.PtrSize), obj.TYPE_REG, riscv.REG_T0, 0) p = pp.Append(p, riscv.AADD, obj.TYPE_CONST, 0, int64(types.PtrSize), obj.TYPE_REG, riscv.REG_T0, 0)
p = pp.Appendpp(p, riscv.ABNE, obj.TYPE_REG, riscv.REG_T0, 0, obj.TYPE_BRANCH, 0, 0) p = pp.Append(p, riscv.ABNE, obj.TYPE_REG, riscv.REG_T0, 0, obj.TYPE_BRANCH, 0, 0)
p.Reg = riscv.REG_T1 p.Reg = riscv.REG_T1
gc.Patch(p, loop) p.To.SetTarget(loop)
return p return p
} }

View File

@ -5,12 +5,12 @@
package riscv64 package riscv64
import ( import (
"cmd/compile/internal/gc" "cmd/compile/internal/objw"
"cmd/internal/obj" "cmd/internal/obj"
"cmd/internal/obj/riscv" "cmd/internal/obj/riscv"
) )
func ginsnop(pp *gc.Progs) *obj.Prog { func ginsnop(pp *objw.Progs) *obj.Prog {
// Hardware nop is ADD $0, ZERO // Hardware nop is ADD $0, ZERO
p := pp.Prog(riscv.AADD) p := pp.Prog(riscv.AADD)
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST

View File

@ -502,7 +502,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p4.From.Reg = riscv.REG_TMP p4.From.Reg = riscv.REG_TMP
p4.Reg = riscv.REG_ZERO p4.Reg = riscv.REG_ZERO
p4.To.Type = obj.TYPE_BRANCH p4.To.Type = obj.TYPE_BRANCH
gc.Patch(p4, p1) p4.To.SetTarget(p1)
p5 := s.Prog(riscv.AMOV) p5 := s.Prog(riscv.AMOV)
p5.From.Type = obj.TYPE_CONST p5.From.Type = obj.TYPE_CONST
@ -511,7 +511,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p5.To.Reg = out p5.To.Reg = out
p6 := s.Prog(obj.ANOP) p6 := s.Prog(obj.ANOP)
gc.Patch(p2, p6) p2.To.SetTarget(p6)
case ssa.OpRISCV64LoweredZero: case ssa.OpRISCV64LoweredZero:
mov, sz := largestMove(v.AuxInt) mov, sz := largestMove(v.AuxInt)
@ -537,7 +537,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p3.Reg = v.Args[0].Reg() p3.Reg = v.Args[0].Reg()
p3.From.Type = obj.TYPE_REG p3.From.Type = obj.TYPE_REG
p3.From.Reg = v.Args[1].Reg() p3.From.Reg = v.Args[1].Reg()
gc.Patch(p3, p) p3.To.SetTarget(p)
case ssa.OpRISCV64LoweredMove: case ssa.OpRISCV64LoweredMove:
mov, sz := largestMove(v.AuxInt) mov, sz := largestMove(v.AuxInt)
@ -577,7 +577,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p5.Reg = v.Args[1].Reg() p5.Reg = v.Args[1].Reg()
p5.From.Type = obj.TYPE_REG p5.From.Type = obj.TYPE_REG
p5.From.Reg = v.Args[2].Reg() p5.From.Reg = v.Args[2].Reg()
gc.Patch(p5, p) p5.To.SetTarget(p)
case ssa.OpRISCV64LoweredNilCheck: case ssa.OpRISCV64LoweredNilCheck:
// Issue a load which will fault if arg is nil. // Issue a load which will fault if arg is nil.

View File

@ -6,7 +6,7 @@ package s390x
import ( import (
"cmd/compile/internal/base" "cmd/compile/internal/base"
"cmd/compile/internal/gc" "cmd/compile/internal/objw"
"cmd/internal/obj" "cmd/internal/obj"
"cmd/internal/obj/s390x" "cmd/internal/obj/s390x"
) )
@ -18,7 +18,7 @@ import (
const clearLoopCutoff = 1024 const clearLoopCutoff = 1024
// zerorange clears the stack in the given range. // zerorange clears the stack in the given range.
func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
if cnt == 0 { if cnt == 0 {
return p return p
} }
@ -31,7 +31,7 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
// need to create a copy of the stack pointer that we can adjust. // need to create a copy of the stack pointer that we can adjust.
// We also need to do this if we are going to loop. // We also need to do this if we are going to loop.
if off < 0 || off > 4096-clearLoopCutoff || cnt > clearLoopCutoff { if off < 0 || off > 4096-clearLoopCutoff || cnt > clearLoopCutoff {
p = pp.Appendpp(p, s390x.AADD, obj.TYPE_CONST, 0, off, obj.TYPE_REG, s390x.REGRT1, 0) p = pp.Append(p, s390x.AADD, obj.TYPE_CONST, 0, off, obj.TYPE_REG, s390x.REGRT1, 0)
p.Reg = int16(s390x.REGSP) p.Reg = int16(s390x.REGSP)
reg = s390x.REGRT1 reg = s390x.REGRT1
off = 0 off = 0
@ -40,12 +40,12 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
// Generate a loop of large clears. // Generate a loop of large clears.
if cnt > clearLoopCutoff { if cnt > clearLoopCutoff {
ireg := int16(s390x.REGRT2) // register holds number of remaining loop iterations ireg := int16(s390x.REGRT2) // register holds number of remaining loop iterations
p = pp.Appendpp(p, s390x.AMOVD, obj.TYPE_CONST, 0, cnt/256, obj.TYPE_REG, ireg, 0) p = pp.Append(p, s390x.AMOVD, obj.TYPE_CONST, 0, cnt/256, obj.TYPE_REG, ireg, 0)
p = pp.Appendpp(p, s390x.ACLEAR, obj.TYPE_CONST, 0, 256, obj.TYPE_MEM, reg, off) p = pp.Append(p, s390x.ACLEAR, obj.TYPE_CONST, 0, 256, obj.TYPE_MEM, reg, off)
pl := p pl := p
p = pp.Appendpp(p, s390x.AADD, obj.TYPE_CONST, 0, 256, obj.TYPE_REG, reg, 0) p = pp.Append(p, s390x.AADD, obj.TYPE_CONST, 0, 256, obj.TYPE_REG, reg, 0)
p = pp.Appendpp(p, s390x.ABRCTG, obj.TYPE_REG, ireg, 0, obj.TYPE_BRANCH, 0, 0) p = pp.Append(p, s390x.ABRCTG, obj.TYPE_REG, ireg, 0, obj.TYPE_BRANCH, 0, 0)
gc.Patch(p, pl) p.To.SetTarget(pl)
cnt = cnt % 256 cnt = cnt % 256
} }
@ -70,11 +70,11 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
case 2: case 2:
ins = s390x.AMOVH ins = s390x.AMOVH
} }
p = pp.Appendpp(p, ins, obj.TYPE_CONST, 0, 0, obj.TYPE_MEM, reg, off) p = pp.Append(p, ins, obj.TYPE_CONST, 0, 0, obj.TYPE_MEM, reg, off)
// Handle clears that would require multiple move instructions with CLEAR (assembled as XC). // Handle clears that would require multiple move instructions with CLEAR (assembled as XC).
default: default:
p = pp.Appendpp(p, s390x.ACLEAR, obj.TYPE_CONST, 0, n, obj.TYPE_MEM, reg, off) p = pp.Append(p, s390x.ACLEAR, obj.TYPE_CONST, 0, n, obj.TYPE_MEM, reg, off)
} }
cnt -= n cnt -= n
@ -84,6 +84,6 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
return p return p
} }
func ginsnop(pp *gc.Progs) *obj.Prog { func ginsnop(pp *objw.Progs) *obj.Prog {
return pp.Prog(s390x.ANOPH) return pp.Prog(s390x.ANOPH)
} }

View File

@ -709,7 +709,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
bne := s.Prog(s390x.ABLT) bne := s.Prog(s390x.ABLT)
bne.To.Type = obj.TYPE_BRANCH bne.To.Type = obj.TYPE_BRANCH
gc.Patch(bne, mvc) bne.To.SetTarget(mvc)
if v.AuxInt > 0 { if v.AuxInt > 0 {
mvc := s.Prog(s390x.AMVC) mvc := s.Prog(s390x.AMVC)
@ -751,7 +751,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
bne := s.Prog(s390x.ABLT) bne := s.Prog(s390x.ABLT)
bne.To.Type = obj.TYPE_BRANCH bne.To.Type = obj.TYPE_BRANCH
gc.Patch(bne, clear) bne.To.SetTarget(clear)
if v.AuxInt > 0 { if v.AuxInt > 0 {
clear := s.Prog(s390x.ACLEAR) clear := s.Prog(s390x.ACLEAR)
@ -846,7 +846,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// NOP (so the BNE has somewhere to land) // NOP (so the BNE has somewhere to land)
nop := s.Prog(obj.ANOP) nop := s.Prog(obj.ANOP)
gc.Patch(bne, nop) bne.To.SetTarget(nop)
case ssa.OpS390XLoweredAtomicExchange32, ssa.OpS390XLoweredAtomicExchange64: case ssa.OpS390XLoweredAtomicExchange32, ssa.OpS390XLoweredAtomicExchange64:
// Loop until the CS{,G} succeeds. // Loop until the CS{,G} succeeds.
// MOV{WZ,D} arg0, ret // MOV{WZ,D} arg0, ret
@ -873,7 +873,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// BNE cs // BNE cs
bne := s.Prog(s390x.ABNE) bne := s.Prog(s390x.ABNE)
bne.To.Type = obj.TYPE_BRANCH bne.To.Type = obj.TYPE_BRANCH
gc.Patch(bne, cs) bne.To.SetTarget(cs)
case ssa.OpS390XSYNC: case ssa.OpS390XSYNC:
s.Prog(s390x.ASYNC) s.Prog(s390x.ASYNC)
case ssa.OpClobber: case ssa.OpClobber:

View File

@ -9,6 +9,7 @@ import (
"cmd/compile/internal/gc" "cmd/compile/internal/gc"
"cmd/compile/internal/ir" "cmd/compile/internal/ir"
"cmd/compile/internal/logopt" "cmd/compile/internal/logopt"
"cmd/compile/internal/objw"
"cmd/compile/internal/ssa" "cmd/compile/internal/ssa"
"cmd/compile/internal/types" "cmd/compile/internal/types"
"cmd/internal/obj" "cmd/internal/obj"
@ -30,7 +31,7 @@ func Init(arch *gc.Arch) {
arch.SSAGenBlock = ssaGenBlock arch.SSAGenBlock = ssaGenBlock
} }
func zeroRange(pp *gc.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Prog { func zeroRange(pp *objw.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Prog {
if cnt == 0 { if cnt == 0 {
return p return p
} }
@ -39,15 +40,15 @@ func zeroRange(pp *gc.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Pr
} }
for i := int64(0); i < cnt; i += 8 { for i := int64(0); i < cnt; i += 8 {
p = pp.Appendpp(p, wasm.AGet, obj.TYPE_REG, wasm.REG_SP, 0, 0, 0, 0) p = pp.Append(p, wasm.AGet, obj.TYPE_REG, wasm.REG_SP, 0, 0, 0, 0)
p = pp.Appendpp(p, wasm.AI64Const, obj.TYPE_CONST, 0, 0, 0, 0, 0) p = pp.Append(p, wasm.AI64Const, obj.TYPE_CONST, 0, 0, 0, 0, 0)
p = pp.Appendpp(p, wasm.AI64Store, 0, 0, 0, obj.TYPE_CONST, 0, off+i) p = pp.Append(p, wasm.AI64Store, 0, 0, 0, obj.TYPE_CONST, 0, off+i)
} }
return p return p
} }
func ginsnop(pp *gc.Progs) *obj.Prog { func ginsnop(pp *objw.Progs) *obj.Prog {
return pp.Prog(wasm.ANop) return pp.Prog(wasm.ANop)
} }

View File

@ -5,41 +5,41 @@
package x86 package x86
import ( import (
"cmd/compile/internal/gc"
"cmd/compile/internal/ir" "cmd/compile/internal/ir"
"cmd/compile/internal/objw"
"cmd/compile/internal/types" "cmd/compile/internal/types"
"cmd/internal/obj" "cmd/internal/obj"
"cmd/internal/obj/x86" "cmd/internal/obj/x86"
) )
func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, ax *uint32) *obj.Prog { func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, ax *uint32) *obj.Prog {
if cnt == 0 { if cnt == 0 {
return p return p
} }
if *ax == 0 { if *ax == 0 {
p = pp.Appendpp(p, x86.AMOVL, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0) p = pp.Append(p, x86.AMOVL, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
*ax = 1 *ax = 1
} }
if cnt <= int64(4*types.RegSize) { if cnt <= int64(4*types.RegSize) {
for i := int64(0); i < cnt; i += int64(types.RegSize) { for i := int64(0); i < cnt; i += int64(types.RegSize) {
p = pp.Appendpp(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off+i) p = pp.Append(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off+i)
} }
} else if cnt <= int64(128*types.RegSize) { } else if cnt <= int64(128*types.RegSize) {
p = pp.Appendpp(p, x86.ALEAL, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0) p = pp.Append(p, x86.ALEAL, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0)
p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, 1*(128-cnt/int64(types.RegSize))) p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, 1*(128-cnt/int64(types.RegSize)))
p.To.Sym = ir.Syms.Duffzero p.To.Sym = ir.Syms.Duffzero
} else { } else {
p = pp.Appendpp(p, x86.AMOVL, obj.TYPE_CONST, 0, cnt/int64(types.RegSize), obj.TYPE_REG, x86.REG_CX, 0) p = pp.Append(p, x86.AMOVL, obj.TYPE_CONST, 0, cnt/int64(types.RegSize), obj.TYPE_REG, x86.REG_CX, 0)
p = pp.Appendpp(p, x86.ALEAL, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0) p = pp.Append(p, x86.ALEAL, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0)
p = pp.Appendpp(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0) p = pp.Append(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
p = pp.Appendpp(p, x86.ASTOSL, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0) p = pp.Append(p, x86.ASTOSL, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
} }
return p return p
} }
func ginsnop(pp *gc.Progs) *obj.Prog { func ginsnop(pp *objw.Progs) *obj.Prog {
// See comment in ../amd64/ggen.go. // See comment in ../amd64/ggen.go.
p := pp.Prog(x86.AXCHGL) p := pp.Prog(x86.AXCHGL)
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG