diff --git a/src/runtime/proc1.go b/src/runtime/proc1.go index 35d9e86e8e..4ffe11b653 100644 --- a/src/runtime/proc1.go +++ b/src/runtime/proc1.go @@ -1823,10 +1823,6 @@ func reentersyscall(pc, sp uintptr) { // but can have inconsistent g->sched, do not let GC observe it. _g_.m.locks++ - if trace.enabled { - systemstack(traceGoSysCall) - } - // Entersyscall must not call any function that might split/grow the stack. // (See details in comment above.) // Catch calls that might, by replacing the stack guard with something that @@ -1846,6 +1842,14 @@ func reentersyscall(pc, sp uintptr) { }) } + if trace.enabled { + systemstack(traceGoSysCall) + // systemstack itself clobbers g.sched.{pc,sp} and we might + // need them later when the G is genuinely blocked in a + // syscall + save(pc, sp) + } + if atomicload(&sched.sysmonwait) != 0 { // TODO: fast atomic systemstack(entersyscall_sysmon) save(pc, sp) diff --git a/src/runtime/trace.go b/src/runtime/trace.go index 29600b5c4d..6631bc29d1 100644 --- a/src/runtime/trace.go +++ b/src/runtime/trace.go @@ -828,7 +828,7 @@ func traceGoUnpark(gp *g, skip int) { } func traceGoSysCall() { - traceEvent(traceEvGoSysCall, 4) + traceEvent(traceEvGoSysCall, 1) } func traceGoSysExit(seq uint64, ts int64) {