1
0
mirror of https://github.com/golang/go synced 2024-11-26 07:17:59 -07:00

[dev.typeparams] Revert "[dev.typeparams] runtime,cmd/compile,cmd/link: replace jmpdefer with a loop"

This reverts CL 227652.

I'm reverting CL 337651 and this builds on top of it.

Change-Id: I03ce363be44c2a3defff2e43e7b1aad83386820d
Reviewed-on: https://go-review.googlesource.com/c/go/+/338709
Trust: Austin Clements <austin@google.com>
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Cherry Mui <cherryyz@google.com>
This commit is contained in:
Austin Clements 2021-07-30 16:40:17 -04:00
parent 40e561d933
commit e3e9f0bb2d
31 changed files with 324 additions and 39 deletions

View File

@ -18,6 +18,7 @@ func Init(arch *ssagen.ArchInfo) {
arch.ZeroRange = zerorange
arch.Ginsnop = ginsnop
arch.Ginsnopdefer = ginsnop
arch.SSAMarkMoves = ssaMarkMoves
arch.SSAGenValue = ssaGenValue

View File

@ -18,6 +18,7 @@ func Init(arch *ssagen.ArchInfo) {
arch.SoftFloat = buildcfg.GOARM == 5
arch.ZeroRange = zerorange
arch.Ginsnop = ginsnop
arch.Ginsnopdefer = ginsnop
arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
arch.SSAGenValue = ssaGenValue

View File

@ -18,6 +18,7 @@ func Init(arch *ssagen.ArchInfo) {
arch.PadFrame = padframe
arch.ZeroRange = zerorange
arch.Ginsnop = ginsnop
arch.Ginsnopdefer = ginsnop
arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
arch.SSAGenValue = ssaGenValue

View File

@ -21,6 +21,7 @@ func Init(arch *ssagen.ArchInfo) {
arch.SoftFloat = (buildcfg.GOMIPS == "softfloat")
arch.ZeroRange = zerorange
arch.Ginsnop = ginsnop
arch.Ginsnopdefer = ginsnop
arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
arch.SSAGenValue = ssaGenValue
arch.SSAGenBlock = ssaGenBlock

View File

@ -21,6 +21,7 @@ func Init(arch *ssagen.ArchInfo) {
arch.SoftFloat = buildcfg.GOMIPS64 == "softfloat"
arch.ZeroRange = zerorange
arch.Ginsnop = ginsnop
arch.Ginsnopdefer = ginsnop
arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
arch.SSAGenValue = ssaGenValue

View File

@ -20,6 +20,7 @@ func Init(arch *ssagen.ArchInfo) {
arch.ZeroRange = zerorange
arch.Ginsnop = ginsnop
arch.Ginsnopdefer = ginsnopdefer
arch.SSAMarkMoves = ssaMarkMoves
arch.SSAGenValue = ssaGenValue

View File

@ -53,3 +53,30 @@ func ginsnop(pp *objw.Progs) *obj.Prog {
p.To.Reg = ppc64.REG_R0
return p
}
func ginsnopdefer(pp *objw.Progs) *obj.Prog {
// On PPC64 two nops are required in the defer case.
//
// (see gc/cgen.go, gc/plive.go -- copy of comment below)
//
// On ppc64, when compiling Go into position
// independent code on ppc64le we insert an
// instruction to reload the TOC pointer from the
// stack as well. See the long comment near
// jmpdefer in runtime/asm_ppc64.s for why.
// If the MOVD is not needed, insert a hardware NOP
// so that the same number of instructions are used
// on ppc64 in both shared and non-shared modes.
ginsnop(pp)
if base.Ctxt.Flag_shared {
p := pp.Prog(ppc64.AMOVD)
p.From.Type = obj.TYPE_MEM
p.From.Offset = 24
p.From.Reg = ppc64.REGSP
p.To.Type = obj.TYPE_REG
p.To.Reg = ppc64.REG_R2
return p
}
return ginsnop(pp)
}

View File

@ -16,6 +16,7 @@ func Init(arch *ssagen.ArchInfo) {
arch.MAXWIDTH = 1 << 50
arch.Ginsnop = ginsnop
arch.Ginsnopdefer = ginsnop
arch.ZeroRange = zeroRange
arch.SSAMarkMoves = ssaMarkMoves

View File

@ -16,6 +16,7 @@ func Init(arch *ssagen.ArchInfo) {
arch.ZeroRange = zerorange
arch.Ginsnop = ginsnop
arch.Ginsnopdefer = ginsnop
arch.SSAMarkMoves = ssaMarkMoves
arch.SSAGenValue = ssaGenValue

View File

@ -29,7 +29,8 @@ type ArchInfo struct {
// at function entry, and it is ok to clobber registers.
ZeroRange func(*objw.Progs, *obj.Prog, int64, int64, *uint32) *obj.Prog
Ginsnop func(*objw.Progs) *obj.Prog
Ginsnop func(*objw.Progs) *obj.Prog
Ginsnopdefer func(*objw.Progs) *obj.Prog // special ginsnop for deferreturn
// SSAMarkMoves marks any MOVXconst ops that need to avoid clobbering flags.
SSAMarkMoves func(*State, *ssa.Block)

View File

@ -7335,6 +7335,18 @@ func (s *State) PrepareCall(v *ssa.Value) {
call, ok := v.Aux.(*ssa.AuxCall)
if ok && call.Fn == ir.Syms.Deferreturn {
// Deferred calls will appear to be returning to
// the CALL deferreturn(SB) that we are about to emit.
// However, the stack trace code will show the line
// of the instruction byte before the return PC.
// To avoid that being an unrelated instruction,
// insert an actual hardware NOP that will have the right line number.
// This is different from obj.ANOP, which is a virtual no-op
// that doesn't make it into the instruction stream.
Arch.Ginsnopdefer(s.pp)
}
if ok {
// Record call graph information for nowritebarrierrec
// analysis.

View File

@ -24,6 +24,7 @@ func Init(arch *ssagen.ArchInfo) {
arch.ZeroRange = zeroRange
arch.Ginsnop = ginsnop
arch.Ginsnopdefer = ginsnop
arch.SSAMarkMoves = ssaMarkMoves
arch.SSAGenValue = ssaGenValue
@ -125,11 +126,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
case ssa.OpWasmLoweredStaticCall, ssa.OpWasmLoweredClosureCall, ssa.OpWasmLoweredInterCall:
s.PrepareCall(v)
if call, ok := v.Aux.(*ssa.AuxCall); ok && call.Fn == ir.Syms.Deferreturn {
// The runtime needs to inject jumps to
// deferreturn calls using the address in
// _func.deferreturn. Hence, the call to
// deferreturn must itself be a resumption
// point so it gets a target PC.
// add a resume point before call to deferreturn so it can be called again via jmpdefer
s.Prog(wasm.ARESUMEPOINT)
}
if v.Op == ssa.OpWasmLoweredClosureCall {

View File

@ -34,6 +34,7 @@ func Init(arch *ssagen.ArchInfo) {
arch.ZeroRange = zerorange
arch.Ginsnop = ginsnop
arch.Ginsnopdefer = ginsnop
arch.SSAMarkMoves = ssaMarkMoves
}

View File

@ -355,10 +355,11 @@ var oprange [ALAST & obj.AMask][]Optab
var xcmp [C_GOK + 1][C_GOK + 1]bool
var (
symdiv *obj.LSym
symdivu *obj.LSym
symmod *obj.LSym
symmodu *obj.LSym
deferreturn *obj.LSym
symdiv *obj.LSym
symdivu *obj.LSym
symmod *obj.LSym
symmodu *obj.LSym
)
// Note about encoding: Prog.scond holds the condition encoding,
@ -1218,6 +1219,8 @@ func buildop(ctxt *obj.Link) {
return
}
deferreturn = ctxt.LookupABI("runtime.deferreturn", obj.ABIInternal)
symdiv = ctxt.Lookup("runtime._div")
symdivu = ctxt.Lookup("runtime._divu")
symmod = ctxt.Lookup("runtime._mod")

View File

@ -129,6 +129,8 @@ var (
morestackNoCtxt *obj.LSym
gcWriteBarrier *obj.LSym
sigpanic *obj.LSym
deferreturn *obj.LSym
jmpdefer *obj.LSym
)
const (
@ -141,6 +143,10 @@ func instinit(ctxt *obj.Link) {
morestackNoCtxt = ctxt.Lookup("runtime.morestack_noctxt")
gcWriteBarrier = ctxt.LookupABI("runtime.gcWriteBarrier", obj.ABIInternal)
sigpanic = ctxt.LookupABI("runtime.sigpanic", obj.ABIInternal)
deferreturn = ctxt.LookupABI("runtime.deferreturn", obj.ABIInternal)
// jmpdefer is defined in assembly as ABI0. The compiler will
// generate a direct ABI0 call from Go, so look for that.
jmpdefer = ctxt.LookupABI(`"".jmpdefer`, obj.ABI0)
}
func preprocess(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) {
@ -417,6 +423,12 @@ func preprocess(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) {
pcAfterCall-- // sigpanic expects to be called without advancing the pc
}
// jmpdefer manipulates the return address on the stack so deferreturn gets called repeatedly.
// Model this in WebAssembly with a loop.
if call.To.Sym == deferreturn {
p = appendp(p, ALoop)
}
// SP -= 8
p = appendp(p, AGet, regAddr(REG_SP))
p = appendp(p, AI32Const, constAddr(8))
@ -467,6 +479,15 @@ func preprocess(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) {
break
}
// jmpdefer removes the frame of deferreturn from the Go stack.
// However, its WebAssembly function still returns normally,
// so we need to return from deferreturn without removing its
// stack frame (no RET), because the frame is already gone.
if call.To.Sym == jmpdefer {
p = appendp(p, AReturn)
break
}
// return value of call is on the top of the stack, indicating whether to unwind the WebAssembly stack
if call.As == ACALLNORESUME && call.To.Sym != sigpanic { // sigpanic unwinds the stack, but it never resumes
// trying to unwind WebAssembly stack but call has no resume point, terminate with error
@ -479,6 +500,21 @@ func preprocess(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) {
unwindExitBranches = append(unwindExitBranches, p)
}
// jump to before the call if jmpdefer has reset the return address to the call's PC
if call.To.Sym == deferreturn {
// get PC_B from -8(SP)
p = appendp(p, AGet, regAddr(REG_SP))
p = appendp(p, AI32Const, constAddr(8))
p = appendp(p, AI32Sub)
p = appendp(p, AI32Load16U, constAddr(0))
p = appendp(p, ATee, regAddr(REG_PC_B))
p = appendp(p, AI32Const, constAddr(call.Pc))
p = appendp(p, AI32Eq)
p = appendp(p, ABrIf, constAddr(0))
p = appendp(p, AEnd) // end of Loop
}
case obj.ARET, ARETUNWIND:
ret := *p
p.As = obj.ANOP

View File

@ -43,6 +43,7 @@ import (
var (
plan9privates *obj.LSym
deferreturn *obj.LSym
)
// Instruction layout.

View File

@ -34,6 +34,7 @@ const (
FuncID_gogo
FuncID_gopanic
FuncID_handleAsyncEvent
FuncID_jmpdefer
FuncID_mcall
FuncID_morestack
FuncID_mstart
@ -59,6 +60,7 @@ var funcIDs = map[string]FuncID{
"gogo": FuncID_gogo,
"gopanic": FuncID_gopanic,
"handleAsyncEvent": FuncID_handleAsyncEvent,
"jmpdefer": FuncID_jmpdefer,
"main": FuncID_runtime_main,
"mcall": FuncID_mcall,
"morestack": FuncID_morestack,

View File

@ -129,10 +129,11 @@ func computeDeferReturn(ctxt *Link, deferReturnSym, s loader.Sym) uint32 {
for ri := 0; ri < relocs.Count(); ri++ {
r := relocs.At(ri)
if target.IsWasm() && r.Type() == objabi.R_ADDR {
// wasm/ssa.go generates an ARESUMEPOINT just
// before the deferreturn call. The "PC" of
// the deferreturn call is stored in the
// R_ADDR relocation on the ARESUMEPOINT.
// Wasm does not have a live variable set at the deferreturn
// call itself. Instead it has one identified by the
// resumption point immediately preceding the deferreturn.
// The wasm code has a R_ADDR relocation which is used to
// set the resumption point to PC_B.
lastWasmAddr = uint32(r.Add())
}
if r.Type().IsDirectCall() && (r.Sym() == deferReturnSym || ldr.IsDeferReturnTramp(r.Sym())) {

View File

@ -582,6 +582,26 @@ TEXT ·publicationBarrier(SB),NOSPLIT,$0-0
// compile barrier.
RET
// void jmpdefer(fn, sp);
// called from deferreturn.
// 1. pop the caller
// 2. sub 5 bytes (the length of CALL & a 32 bit displacement) from the callers
// return (when building for shared libraries, subtract 16 bytes -- 5 bytes
// for CALL & displacement to call __x86.get_pc_thunk.cx, 6 bytes for the
// LEAL to load the offset into BX, and finally 5 for the call & displacement)
// 3. jmp to the argument
TEXT runtime·jmpdefer(SB), NOSPLIT, $0-8
MOVL fv+0(FP), DX // fn
MOVL argp+4(FP), BX // caller sp
LEAL -4(BX), SP // caller sp after CALL
#ifdef GOBUILDMODE_shared
SUBL $16, (SP) // return to CALL again
#else
SUBL $5, (SP) // return to CALL again
#endif
MOVL 0(DX), BX
JMP BX // but first run the deferred function
// Save state of caller into g->sched,
// but using fake PC from systemstack_switch.
// Must only be called from functions with no locals ($0)

View File

@ -662,6 +662,21 @@ TEXT ·publicationBarrier(SB),NOSPLIT,$0-0
// compile barrier.
RET
// func jmpdefer(fv func(), argp uintptr)
// argp is a caller SP.
// called from deferreturn.
// 1. pop the caller
// 2. sub 5 bytes from the callers return
// 3. jmp to the argument
TEXT runtime·jmpdefer(SB), NOSPLIT, $0-16
MOVQ fv+0(FP), DX // fn
MOVQ argp+8(FP), BX // caller sp
LEAQ -8(BX), SP // caller sp after CALL
MOVQ -8(SP), BP // restore BP as if deferreturn returned (harmless if framepointers not in use)
SUBQ $5, (SP) // return to CALL again
MOVQ 0(DX), BX
JMP BX // but first run the deferred function
// Save state of caller into g->sched,
// but using fake PC from systemstack_switch.
// Must only be called from functions with no locals ($0)

View File

@ -506,6 +506,20 @@ CALLFN(·call268435456, 268435456)
CALLFN(·call536870912, 536870912)
CALLFN(·call1073741824, 1073741824)
// void jmpdefer(fn, sp);
// called from deferreturn.
// 1. grab stored LR for caller
// 2. sub 4 bytes to get back to BL deferreturn
// 3. B to fn
TEXT runtime·jmpdefer(SB),NOSPLIT,$0-8
MOVW 0(R13), LR
MOVW $-4(LR), LR // BL deferreturn
MOVW fv+0(FP), R7
MOVW argp+4(FP), R13
MOVW $-4(R13), R13 // SP is 4 below argp, due to saved LR
MOVW 0(R7), R1
B (R1)
// Save state of caller into g->sched,
// but using fake PC from systemstack_switch.
// Must only be called from functions with no locals ($0)

View File

@ -982,6 +982,23 @@ again:
CBNZ R0, again
RET
// void jmpdefer(fv, sp);
// called from deferreturn.
// 1. grab stored LR for caller
// 2. sub 4 bytes to get back to BL deferreturn
// 3. BR to fn
TEXT runtime·jmpdefer(SB), NOSPLIT|NOFRAME, $0-16
MOVD 0(RSP), R0
SUB $4, R0
MOVD R0, LR
MOVD fv+0(FP), R26
MOVD argp+8(FP), R0
MOVD R0, RSP
SUB $8, RSP
MOVD 0(R26), R3
B (R3)
// Save state of caller into g->sched,
// but using fake PC from systemstack_switch.
// Must only be called from functions with no locals ($0)

View File

@ -384,6 +384,22 @@ CALLFN(·call1073741824, 1073741824)
TEXT runtime·procyield(SB),NOSPLIT,$0-0
RET
// void jmpdefer(fv, sp);
// called from deferreturn.
// 1. grab stored LR for caller
// 2. sub 8 bytes to get back to JAL deferreturn
// 3. JMP to fn
TEXT runtime·jmpdefer(SB), NOSPLIT|NOFRAME, $0-16
MOVV 0(R29), R31
ADDV $-8, R31
MOVV fv+0(FP), REGCTXT
MOVV argp+8(FP), R29
ADDV $-8, R29
NOR R0, R0 // prevent scheduling
MOVV 0(REGCTXT), R4
JMP (R4)
// Save state of caller into g->sched,
// but using fake PC from systemstack_switch.
// Must only be called from functions with no locals ($0)

View File

@ -382,6 +382,22 @@ CALLFN(·call1073741824, 1073741824)
TEXT runtime·procyield(SB),NOSPLIT,$0-4
RET
// void jmpdefer(fv, sp);
// called from deferreturn.
// 1. grab stored LR for caller
// 2. sub 8 bytes to get back to JAL deferreturn
// 3. JMP to fn
TEXT runtime·jmpdefer(SB),NOSPLIT,$0-8
MOVW 0(R29), R31
ADDU $-8, R31
MOVW fv+0(FP), REGCTXT
MOVW argp+4(FP), R29
ADDU $-4, R29
NOR R0, R0 // prevent scheduling
MOVW 0(REGCTXT), R4
JMP (R4)
// Save state of caller into g->sched,
// but using fake PC from systemstack_switch.
// Must only be called from functions with no locals ($0)

View File

@ -503,6 +503,34 @@ again:
OR R6, R6, R6 // Set PPR priority back to medium-low
RET
// void jmpdefer(fv, sp);
// called from deferreturn.
// 1. grab stored LR for caller
// 2. sub 8 bytes to get back to either nop or toc reload before deferreturn
// 3. BR to fn
// When dynamically linking Go, it is not sufficient to rewind to the BL
// deferreturn -- we might be jumping between modules and so we need to reset
// the TOC pointer in r2. To do this, codegen inserts MOVD 24(R1), R2 *before*
// the BL deferreturn and jmpdefer rewinds to that.
TEXT runtime·jmpdefer(SB), NOSPLIT|NOFRAME, $0-16
MOVD 0(R1), R31
SUB $8, R31
MOVD R31, LR
MOVD fv+0(FP), R11
MOVD argp+8(FP), R1
SUB $FIXED_FRAME, R1
#ifdef GOOS_aix
// AIX won't trigger a SIGSEGV if R11 = nil
// So it manually triggers it
CMP R0, R11
BNE 2(PC)
MOVD R0, 0(R0)
#endif
MOVD 0(R11), R12
MOVD R12, CTR
BR (CTR)
// Save state of caller into g->sched,
// but using fake PC from systemstack_switch.
// Must only be called from functions with no locals ($0)

View File

@ -248,6 +248,21 @@ TEXT gogo<>(SB), NOSPLIT|NOFRAME, $0
MOV gobuf_pc(T0), T0
JALR ZERO, T0
// func jmpdefer(fv func(), argp uintptr)
// called from deferreturn
// 1. grab stored return address from the caller's frame
// 2. sub 8 bytes to get back to JAL deferreturn
// 3. JMP to fn
TEXT runtime·jmpdefer(SB), NOSPLIT|NOFRAME, $0-16
MOV 0(X2), RA
ADD $-8, RA
MOV fv+0(FP), CTXT
MOV argp+8(FP), X2
ADD $-8, X2
MOV 0(CTXT), T0
JALR ZERO, T0
// func procyield(cycles uint32)
TEXT runtime·procyield(SB),NOSPLIT,$0-0
RET

View File

@ -480,6 +480,21 @@ TEXT callfnMVC<>(SB),NOSPLIT|NOFRAME,$0-0
TEXT runtime·procyield(SB),NOSPLIT,$0-0
RET
// void jmpdefer(fv, sp);
// called from deferreturn.
// 1. grab stored LR for caller
// 2. sub 6 bytes to get back to BL deferreturn (size of BRASL instruction)
// 3. BR to fn
TEXT runtime·jmpdefer(SB),NOSPLIT|NOFRAME,$0-16
MOVD 0(R15), R1
SUB $6, R1, LR
MOVD fv+0(FP), R12
MOVD argp+8(FP), R15
SUB $8, R15
MOVD 0(R12), R3
BR (R3)
// Save state of caller into g->sched,
// but using fake PC from systemstack_switch.
// Must only be called from functions with no locals ($0)

View File

@ -193,6 +193,35 @@ TEXT runtime·return0(SB), NOSPLIT, $0-0
MOVD $0, RET0
RET
TEXT runtime·jmpdefer(SB), NOSPLIT, $0-16
MOVD fv+0(FP), CTXT
Get CTXT
I64Eqz
If
CALLNORESUME runtime·sigpanic<ABIInternal>(SB)
End
// caller sp after CALL
I64Load argp+8(FP)
I64Const $8
I64Sub
I32WrapI64
Set SP
// decrease PC_B by 1 to CALL again
Get SP
I32Load16U (SP)
I32Const $1
I32Sub
I32Store16 $0
// but first run the deferred function
Get CTXT
I32WrapI64
I64Load $0
JMP
TEXT runtime·asminit(SB), NOSPLIT, $0-0
// No per-thread init.
RET

View File

@ -396,39 +396,47 @@ func freedeferfn() {
throw("freedefer with d.fn != nil")
}
// deferreturn runs deferred functions for the caller's frame.
// Run a deferred function if there is one.
// The compiler inserts a call to this at the end of any
// function which calls defer.
// If there is a deferred function, this will call runtime·jmpdefer,
// which will jump to the deferred function such that it appears
// to have been called by the caller of deferreturn at the point
// just before deferreturn was called. The effect is that deferreturn
// is called again and again until there are no more deferred functions.
func deferreturn() {
gp := getg()
for {
d := gp._defer
if d == nil {
return
d := gp._defer
if d == nil {
return
}
sp := getcallersp()
if d.sp != sp {
return
}
if d.openDefer {
done := runOpenDeferFrame(gp, d)
if !done {
throw("unfinished open-coded defers in deferreturn")
}
sp := getcallersp()
if d.sp != sp {
return
}
if d.openDefer {
done := runOpenDeferFrame(gp, d)
if !done {
throw("unfinished open-coded defers in deferreturn")
}
gp._defer = d.link
freedefer(d)
// If this frame uses open defers, then this
// must be the only defer record for the
// frame, so we can just return.
return
}
fn := d.fn
d.fn = nil
gp._defer = d.link
freedefer(d)
fn()
return
}
fn := d.fn
d.fn = nil
gp._defer = d.link
freedefer(d)
// If the defer function pointer is nil, force the seg fault to happen
// here rather than in jmpdefer. gentraceback() throws an error if it is
// called with a callback on an LR architecture and jmpdefer is on the
// stack, because jmpdefer manipulates SP (see issue #8153).
_ = **(**funcval)(unsafe.Pointer(&fn))
// We must not split the stack between computing argp and
// calling jmpdefer because argp is a uintptr stack pointer.
argp := getcallersp() + sys.MinFrameSize
jmpdefer(fn, argp)
}
// Goexit terminates the goroutine that calls it. No other goroutine is affected.

View File

@ -176,6 +176,8 @@ func cgocallback(fn, frame, ctxt uintptr)
func gogo(buf *gobuf)
//go:noescape
func jmpdefer(fv func(), argp uintptr)
func asminit()
func setg(gg *g)
func breakpoint()

View File

@ -331,6 +331,7 @@ const (
funcID_gogo
funcID_gopanic
funcID_handleAsyncEvent
funcID_jmpdefer
funcID_mcall
funcID_morestack
funcID_mstart