1
0
mirror of https://github.com/golang/go synced 2024-11-26 14:56:47 -07:00

runtime: avoid multiple records with identical stacks from MutexProfile

When using frame pointer unwinding, we defer frame skipping and inline
expansion for call stacks until profile reporting time. We can end up
with records which have different stacks if no frames are skipped, but
identical stacks once skipping is taken into account. Returning multiple
records with the same stack (but different values) has broken programs
which rely on the records already being fully aggregated by call stack
when returned from runtime.MutexProfile. This CL addresses the problem
by handling skipping at recording time. We do full inline expansion to
correctly skip the desired number of frames when recording the call
stack, and then handle the rest of inline expansion when reporting the
profile.

The regression test in this CL is adapted from the reproducer in
https://github.com/grafana/pyroscope-go/issues/103, authored by Tolya
Korniltsev.

Fixes #67548

Co-Authored-By: Tolya Korniltsev <korniltsev.anatoly@gmail.com>
Change-Id: I6a42ce612377f235b2c8c0cec9ba8e9331224b84
Reviewed-on: https://go-review.googlesource.com/c/go/+/595966
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Reviewed-by: Carlos Amedee <carlos@golang.org>
Reviewed-by: Cherry Mui <cherryyz@google.com>
Auto-Submit: Carlos Amedee <carlos@golang.org>
Reviewed-by: Felix Geisendörfer <felix.geisendoerfer@datadoghq.com>
This commit is contained in:
Nick Ripley 2024-07-02 12:53:36 -04:00 committed by Gopher Robot
parent e89e880eac
commit 183a40db6d
3 changed files with 192 additions and 14 deletions

View File

@ -9,6 +9,7 @@ package runtime
import ( import (
"internal/abi" "internal/abi"
"internal/goarch"
"internal/profilerecord" "internal/profilerecord"
"internal/runtime/atomic" "internal/runtime/atomic"
"runtime/internal/sys" "runtime/internal/sys"
@ -542,16 +543,14 @@ func saveblockevent(cycles, rate int64, skip int, which bucketType) {
gp := getg() gp := getg()
mp := acquirem() // we must not be preempted while accessing profstack mp := acquirem() // we must not be preempted while accessing profstack
nstk := 1 var nstk int
if tracefpunwindoff() || gp.m.hasCgoOnStack() { if tracefpunwindoff() || gp.m.hasCgoOnStack() {
mp.profStack[0] = logicalStackSentinel
if gp.m.curg == nil || gp.m.curg == gp { if gp.m.curg == nil || gp.m.curg == gp {
nstk = callers(skip, mp.profStack[1:]) nstk = callers(skip, mp.profStack)
} else { } else {
nstk = gcallers(gp.m.curg, skip, mp.profStack[1:]) nstk = gcallers(gp.m.curg, skip, mp.profStack)
} }
} else { } else {
mp.profStack[0] = uintptr(skip)
if gp.m.curg == nil || gp.m.curg == gp { if gp.m.curg == nil || gp.m.curg == gp {
if skip > 0 { if skip > 0 {
// We skip one fewer frame than the provided value for frame // We skip one fewer frame than the provided value for frame
@ -559,12 +558,12 @@ func saveblockevent(cycles, rate int64, skip int, which bucketType) {
// frame, whereas the saved frame pointer will give us the // frame, whereas the saved frame pointer will give us the
// caller's return address first (so, not including // caller's return address first (so, not including
// saveblockevent) // saveblockevent)
mp.profStack[0] -= 1 skip -= 1
} }
nstk += fpTracebackPCs(unsafe.Pointer(getfp()), mp.profStack[1:]) nstk = fpTracebackPartialExpand(skip, unsafe.Pointer(getfp()), mp.profStack)
} else { } else {
mp.profStack[1] = gp.m.curg.sched.pc mp.profStack[0] = gp.m.curg.sched.pc
nstk += 1 + fpTracebackPCs(unsafe.Pointer(gp.m.curg.sched.bp), mp.profStack[2:]) nstk = 1 + fpTracebackPartialExpand(skip, unsafe.Pointer(gp.m.curg.sched.bp), mp.profStack[1:])
} }
} }
@ -572,6 +571,52 @@ func saveblockevent(cycles, rate int64, skip int, which bucketType) {
releasem(mp) releasem(mp)
} }
// fpTracebackPartialExpand records a call stack obtained starting from fp.
// This function will skip the given number of frames, properly accounting for
// inlining, and save remaining frames as "physical" return addresses. The
// consumer should later use CallersFrames or similar to expand inline frames.
func fpTracebackPartialExpand(skip int, fp unsafe.Pointer, pcBuf []uintptr) int {
var n int
lastFuncID := abi.FuncIDNormal
skipOrAdd := func(retPC uintptr) bool {
if skip > 0 {
skip--
} else if n < len(pcBuf) {
pcBuf[n] = retPC
n++
}
return n < len(pcBuf)
}
for n < len(pcBuf) && fp != nil {
// return addr sits one word above the frame pointer
pc := *(*uintptr)(unsafe.Pointer(uintptr(fp) + goarch.PtrSize))
if skip > 0 {
callPC := pc - 1
fi := findfunc(callPC)
u, uf := newInlineUnwinder(fi, callPC)
for ; uf.valid(); uf = u.next(uf) {
sf := u.srcFunc(uf)
if sf.funcID == abi.FuncIDWrapper && elideWrapperCalling(lastFuncID) {
// ignore wrappers
} else if more := skipOrAdd(uf.pc + 1); !more {
return n
}
lastFuncID = sf.funcID
}
} else {
// We've skipped the desired number of frames, so no need
// to perform further inline expansion now.
pcBuf[n] = pc
n++
}
// follow the frame pointer to the next one
fp = unsafe.Pointer(*(*uintptr)(fp))
}
return n
}
// lockTimer assists with profiling contention on runtime-internal locks. // lockTimer assists with profiling contention on runtime-internal locks.
// //
// There are several steps between the time that an M experiences contention and // There are several steps between the time that an M experiences contention and
@ -1075,10 +1120,34 @@ type BlockProfileRecord struct {
// the [testing] package's -test.blockprofile flag instead // the [testing] package's -test.blockprofile flag instead
// of calling BlockProfile directly. // of calling BlockProfile directly.
func BlockProfile(p []BlockProfileRecord) (n int, ok bool) { func BlockProfile(p []BlockProfileRecord) (n int, ok bool) {
return blockProfileInternal(len(p), func(r profilerecord.BlockProfileRecord) { n, ok = blockProfileInternal(len(p), func(r profilerecord.BlockProfileRecord) {
copyBlockProfileRecord(&p[0], r) copyBlockProfileRecord(&p[0], r)
p = p[1:] p = p[1:]
}) })
if !ok {
return
}
expandFrames(p[:n])
return
}
func expandFrames(p []BlockProfileRecord) {
expandedStack := makeProfStack()
for i := range p {
cf := CallersFrames(p[i].Stack())
j := 0
for ; j < len(expandedStack); j++ {
f, more := cf.Next()
// f.PC is a "call PC", but later consumers will expect
// "return PCs"
expandedStack[i] = f.PC + 1
if !more {
break
}
}
k := copy(p[i].Stack0[:], expandedStack[:j])
clear(p[i].Stack0[k:])
}
} }
// blockProfileInternal returns the number of records n in the profile. If there // blockProfileInternal returns the number of records n in the profile. If there
@ -1111,6 +1180,9 @@ func blockProfileInternal(size int, copyFn func(profilerecord.BlockProfileRecord
return return
} }
// copyBlockProfileRecord copies the sample values and call stack from src to dst.
// The call stack is copied as-is. The caller is responsible for handling inline
// expansion, needed when the call stack was collected with frame pointer unwinding.
func copyBlockProfileRecord(dst *BlockProfileRecord, src profilerecord.BlockProfileRecord) { func copyBlockProfileRecord(dst *BlockProfileRecord, src profilerecord.BlockProfileRecord) {
dst.Count = src.Count dst.Count = src.Count
dst.Cycles = src.Cycles dst.Cycles = src.Cycles
@ -1123,7 +1195,11 @@ func copyBlockProfileRecord(dst *BlockProfileRecord, src profilerecord.BlockProf
if asanenabled { if asanenabled {
asanwrite(unsafe.Pointer(&dst.Stack0[0]), unsafe.Sizeof(dst.Stack0)) asanwrite(unsafe.Pointer(&dst.Stack0[0]), unsafe.Sizeof(dst.Stack0))
} }
i := fpunwindExpand(dst.Stack0[:], src.Stack) // We just copy the stack here without inline expansion
// (needed if frame pointer unwinding is used)
// since this function is called under the profile lock,
// and doing something that might allocate can violate lock ordering.
i := copy(dst.Stack0[:], src.Stack)
clear(dst.Stack0[i:]) clear(dst.Stack0[i:])
} }
@ -1142,10 +1218,15 @@ func pprof_blockProfileInternal(p []profilerecord.BlockProfileRecord) (n int, ok
// Most clients should use the [runtime/pprof] package // Most clients should use the [runtime/pprof] package
// instead of calling MutexProfile directly. // instead of calling MutexProfile directly.
func MutexProfile(p []BlockProfileRecord) (n int, ok bool) { func MutexProfile(p []BlockProfileRecord) (n int, ok bool) {
return mutexProfileInternal(len(p), func(r profilerecord.BlockProfileRecord) { n, ok = mutexProfileInternal(len(p), func(r profilerecord.BlockProfileRecord) {
copyBlockProfileRecord(&p[0], r) copyBlockProfileRecord(&p[0], r)
p = p[1:] p = p[1:]
}) })
if !ok {
return
}
expandFrames(p[:n])
return
} }
// mutexProfileInternal returns the number of records n in the profile. If there // mutexProfileInternal returns the number of records n in the profile. If there

View File

@ -404,6 +404,25 @@ type countProfile interface {
Label(i int) *labelMap Label(i int) *labelMap
} }
// expandInlinedFrames copies the call stack from pcs into dst, expanding any
// PCs corresponding to inlined calls into the corresponding PCs for the inlined
// functions. Returns the number of frames copied to dst.
func expandInlinedFrames(dst, pcs []uintptr) int {
cf := runtime.CallersFrames(pcs)
var n int
for n < len(dst) {
f, more := cf.Next()
// f.PC is a "call PC", but later consumers will expect
// "return PCs"
dst[n] = f.PC + 1
n++
if !more {
break
}
}
return n
}
// printCountCycleProfile outputs block profile records (for block or mutex profiles) // printCountCycleProfile outputs block profile records (for block or mutex profiles)
// as the pprof-proto format output. Translations from cycle count to time duration // as the pprof-proto format output. Translations from cycle count to time duration
// are done because The proto expects count and time (nanoseconds) instead of count // are done because The proto expects count and time (nanoseconds) instead of count
@ -426,7 +445,7 @@ func printCountCycleProfile(w io.Writer, countName, cycleName string, records []
values[1] = int64(float64(r.Cycles) / cpuGHz) values[1] = int64(float64(r.Cycles) / cpuGHz)
// For count profiles, all stack addresses are // For count profiles, all stack addresses are
// return PCs, which is what appendLocsForStack expects. // return PCs, which is what appendLocsForStack expects.
n := pprof_fpunwindExpand(expandedStack[:], r.Stack) n := expandInlinedFrames(expandedStack, r.Stack)
locs = b.appendLocsForStack(locs[:0], expandedStack[:n]) locs = b.appendLocsForStack(locs[:0], expandedStack[:n])
b.pbSample(values, locs, nil) b.pbSample(values, locs, nil)
} }
@ -935,7 +954,7 @@ func writeProfileInternal(w io.Writer, debug int, name string, runtimeProfile fu
for i := range p { for i := range p {
r := &p[i] r := &p[i]
fmt.Fprintf(w, "%v %v @", r.Cycles, r.Count) fmt.Fprintf(w, "%v %v @", r.Cycles, r.Count)
n := pprof_fpunwindExpand(expandedStack, r.Stack) n := expandInlinedFrames(expandedStack, r.Stack)
stack := expandedStack[:n] stack := expandedStack[:n]
for _, pc := range stack { for _, pc := range stack {
fmt.Fprintf(w, " %#x", pc) fmt.Fprintf(w, " %#x", pc)

View File

@ -2578,3 +2578,81 @@ func produceProfileEvents(t *testing.T, depth int) {
runtime.GC() runtime.GC()
goroutineDeep(t, depth-4) // -4 for produceProfileEvents, **, chanrecv1, chanrev, gopark goroutineDeep(t, depth-4) // -4 for produceProfileEvents, **, chanrecv1, chanrev, gopark
} }
func TestMutexBlockFullAggregation(t *testing.T) {
// This regression test is adapted from
// https://github.com/grafana/pyroscope-go/issues/103,
// authored by Tolya Korniltsev
var m sync.Mutex
prev := runtime.SetMutexProfileFraction(-1)
defer runtime.SetMutexProfileFraction(prev)
const fraction = 1
const iters = 100
const workers = 2
runtime.SetMutexProfileFraction(fraction)
runtime.SetBlockProfileRate(1)
defer runtime.SetBlockProfileRate(0)
wg := sync.WaitGroup{}
wg.Add(workers)
for j := 0; j < workers; j++ {
go func() {
for i := 0; i < iters; i++ {
m.Lock()
// Wait at least 1 millisecond to pass the
// starvation threshold for the mutex
time.Sleep(time.Millisecond)
m.Unlock()
}
wg.Done()
}()
}
wg.Wait()
assertNoDuplicates := func(name string, collect func([]runtime.BlockProfileRecord) (int, bool)) {
var p []runtime.BlockProfileRecord
n, ok := collect(nil)
for {
p = make([]runtime.BlockProfileRecord, n+50)
n, ok = collect(p)
if ok {
p = p[:n]
break
}
}
seen := make(map[string]struct{})
for _, r := range p {
cf := runtime.CallersFrames(r.Stack())
var stack strings.Builder
for {
f, more := cf.Next()
stack.WriteString(f.Func.Name())
if !more {
break
}
stack.WriteByte('\n')
}
s := stack.String()
if !strings.Contains(s, "TestMutexBlockFullAggregation") {
continue
}
if _, ok := seen[s]; ok {
t.Errorf("saw duplicate entry in %s profile with stack:\n%s", name, s)
}
seen[s] = struct{}{}
}
if len(seen) == 0 {
t.Errorf("did not see any samples in %s profile for this test", name)
}
}
t.Run("mutex", func(t *testing.T) {
assertNoDuplicates("mutex", runtime.MutexProfile)
})
t.Run("block", func(t *testing.T) {
assertNoDuplicates("block", runtime.BlockProfile)
})
}