mirror of
https://github.com/golang/go
synced 2024-11-22 21:10:03 -07:00
e76353d5a9
This change allows the tracer to be reentrant by restructuring the internals such that writing an event is atomic with respect to stack growth. Essentially, a core set of functions that are involved in acquiring a trace buffer and writing to it are all marked nosplit. Stack growth is currently the only hidden place where the tracer may be accidentally reentrant, preventing the tracer from being used everywhere. It already lacks write barriers, lacks allocations, and is non-preemptible. This change thus makes the tracer fully reentrant, since the only reentry case it needs to handle is stack growth. Since the invariants needed to attain this are subtle, this change also extends the debugTraceReentrancy debug mode to check these invariants as well. Specifically, the invariants are checked by setting the throwsplit flag. A side benefit of this change is it simplifies the trace event writing API a good bit: there's no need to actually thread the event writer through things, and most callsites look a bit simpler. Change-Id: I7c329fb7a6cb936bd363c44cf882ea0a925132f3 Reviewed-on: https://go-review.googlesource.com/c/go/+/587599 Reviewed-by: Austin Clements <austin@google.com> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
163 lines
5.9 KiB
Go
163 lines
5.9 KiB
Go
// Copyright 2024 The Go Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style
|
|
// license that can be found in the LICENSE file.
|
|
|
|
// Runtime -> tracer API for memory events.
|
|
|
|
package runtime
|
|
|
|
import (
|
|
"internal/abi"
|
|
"internal/runtime/sys"
|
|
)
|
|
|
|
// Batch type values for the alloc/free experiment.
|
|
const (
|
|
traceAllocFreeTypesBatch = iota // Contains types. [{id, address, size, ptrspan, name length, name string} ...]
|
|
traceAllocFreeInfoBatch // Contains info for interpreting events. [min heap addr, page size, min heap align, min stack align]
|
|
)
|
|
|
|
// traceSnapshotMemory takes a snapshot of all runtime memory that there are events for
|
|
// (heap spans, heap objects, goroutine stacks, etc.) and writes out events for them.
|
|
//
|
|
// The world must be stopped and tracing must be enabled when this function is called.
|
|
func traceSnapshotMemory(gen uintptr) {
|
|
assertWorldStopped()
|
|
|
|
// Write a batch containing information that'll be necessary to
|
|
// interpret the events.
|
|
var flushed bool
|
|
w := unsafeTraceExpWriter(gen, nil, traceExperimentAllocFree)
|
|
w, flushed = w.ensure(1 + 4*traceBytesPerNumber)
|
|
if flushed {
|
|
// Annotate the batch as containing additional info.
|
|
w.byte(byte(traceAllocFreeInfoBatch))
|
|
}
|
|
|
|
// Emit info.
|
|
w.varint(uint64(trace.minPageHeapAddr))
|
|
w.varint(uint64(pageSize))
|
|
w.varint(uint64(minHeapAlign))
|
|
w.varint(uint64(fixedStack))
|
|
|
|
// Finish writing the batch.
|
|
w.flush().end()
|
|
|
|
// Start tracing.
|
|
trace := traceAcquire()
|
|
if !trace.ok() {
|
|
throw("traceSnapshotMemory: tracing is not enabled")
|
|
}
|
|
|
|
// Write out all the heap spans and heap objects.
|
|
for _, s := range mheap_.allspans {
|
|
if s.state.get() == mSpanDead {
|
|
continue
|
|
}
|
|
// It's some kind of span, so trace that it exists.
|
|
trace.SpanExists(s)
|
|
|
|
// Write out allocated objects if it's a heap span.
|
|
if s.state.get() != mSpanInUse {
|
|
continue
|
|
}
|
|
|
|
// Find all allocated objects.
|
|
abits := s.allocBitsForIndex(0)
|
|
for i := uintptr(0); i < uintptr(s.nelems); i++ {
|
|
if abits.index < uintptr(s.freeindex) || abits.isMarked() {
|
|
x := s.base() + i*s.elemsize
|
|
trace.HeapObjectExists(x, s.typePointersOfUnchecked(x).typ)
|
|
}
|
|
abits.advance()
|
|
}
|
|
}
|
|
|
|
// Write out all the goroutine stacks.
|
|
forEachGRace(func(gp *g) {
|
|
trace.GoroutineStackExists(gp.stack.lo, gp.stack.hi-gp.stack.lo)
|
|
})
|
|
traceRelease(trace)
|
|
}
|
|
|
|
func traceSpanTypeAndClass(s *mspan) traceArg {
|
|
if s.state.get() == mSpanInUse {
|
|
return traceArg(s.spanclass) << 1
|
|
}
|
|
return traceArg(1)
|
|
}
|
|
|
|
// SpanExists records an event indicating that the span exists.
|
|
func (tl traceLocker) SpanExists(s *mspan) {
|
|
tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvSpan, traceSpanID(s), traceArg(s.npages), traceSpanTypeAndClass(s))
|
|
}
|
|
|
|
// SpanAlloc records an event indicating that the span has just been allocated.
|
|
func (tl traceLocker) SpanAlloc(s *mspan) {
|
|
tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvSpanAlloc, traceSpanID(s), traceArg(s.npages), traceSpanTypeAndClass(s))
|
|
}
|
|
|
|
// SpanFree records an event indicating that the span is about to be freed.
|
|
func (tl traceLocker) SpanFree(s *mspan) {
|
|
tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvSpanFree, traceSpanID(s))
|
|
}
|
|
|
|
// traceSpanID creates a trace ID for the span s for the trace.
|
|
func traceSpanID(s *mspan) traceArg {
|
|
return traceArg(uint64(s.base())-trace.minPageHeapAddr) / pageSize
|
|
}
|
|
|
|
// HeapObjectExists records that an object already exists at addr with the provided type.
|
|
// The type is optional, and the size of the slot occupied the object is inferred from the
|
|
// span containing it.
|
|
func (tl traceLocker) HeapObjectExists(addr uintptr, typ *abi.Type) {
|
|
tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvHeapObject, traceHeapObjectID(addr), tl.rtype(typ))
|
|
}
|
|
|
|
// HeapObjectAlloc records that an object was newly allocated at addr with the provided type.
|
|
// The type is optional, and the size of the slot occupied the object is inferred from the
|
|
// span containing it.
|
|
func (tl traceLocker) HeapObjectAlloc(addr uintptr, typ *abi.Type) {
|
|
tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvHeapObjectAlloc, traceHeapObjectID(addr), tl.rtype(typ))
|
|
}
|
|
|
|
// HeapObjectFree records that an object at addr is about to be freed.
|
|
func (tl traceLocker) HeapObjectFree(addr uintptr) {
|
|
tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvHeapObjectFree, traceHeapObjectID(addr))
|
|
}
|
|
|
|
// traceHeapObjectID creates a trace ID for a heap object at address addr.
|
|
func traceHeapObjectID(addr uintptr) traceArg {
|
|
return traceArg(uint64(addr)-trace.minPageHeapAddr) / minHeapAlign
|
|
}
|
|
|
|
// GoroutineStackExists records that a goroutine stack already exists at address base with the provided size.
|
|
func (tl traceLocker) GoroutineStackExists(base, size uintptr) {
|
|
order := traceCompressStackSize(size)
|
|
tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvGoroutineStack, traceGoroutineStackID(base), order)
|
|
}
|
|
|
|
// GoroutineStackAlloc records that a goroutine stack was newly allocated at address base with the provided size..
|
|
func (tl traceLocker) GoroutineStackAlloc(base, size uintptr) {
|
|
order := traceCompressStackSize(size)
|
|
tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvGoroutineStackAlloc, traceGoroutineStackID(base), order)
|
|
}
|
|
|
|
// GoroutineStackFree records that a goroutine stack at address base is about to be freed.
|
|
func (tl traceLocker) GoroutineStackFree(base uintptr) {
|
|
tl.eventWriter(traceGoRunning, traceProcRunning).event(traceEvGoroutineStackFree, traceGoroutineStackID(base))
|
|
}
|
|
|
|
// traceGoroutineStackID creates a trace ID for the goroutine stack from its base address.
|
|
func traceGoroutineStackID(base uintptr) traceArg {
|
|
return traceArg(uint64(base)-trace.minPageHeapAddr) / fixedStack
|
|
}
|
|
|
|
// traceCompressStackSize assumes size is a power of 2 and returns log2(size).
|
|
func traceCompressStackSize(size uintptr) traceArg {
|
|
if size&(size-1) != 0 {
|
|
throw("goroutine stack size is not a power of 2")
|
|
}
|
|
return traceArg(sys.Len64(uint64(size)))
|
|
}
|