1
0
mirror of https://github.com/golang/go synced 2024-11-27 03:21:18 -07:00

runtime: rename GODEBUG=profileruntimelocks to runtimecontentionstacks

profileruntimelocks is new in CL 544195, but the name is deceptive. Even
with profileruntimelocks=0, runtime-internal locks are still profiled.
The actual difference is that call stacks are not collected. Instead all
contention is reported at runtime._LostContendedLock.

Rename this setting to runtimecontentionstacks to make its name more
aligned with its behavior.

In addition, for this release the default is profileruntimelocks=0,
meaning that users are fairly likely to encounter
runtime._LostContendedLock. Rename it to
runtime._LostContendedRuntimeLock in an attempt to make it more
intuitive that these are runtime locks, not locks in application code.

For #57071.

Change-Id: I38aac28b2c0852db643d53b1eab3f3bc42a43393
Reviewed-on: https://go-review.googlesource.com/c/go/+/547055
Reviewed-by: Michael Knyszek <mknyszek@google.com>
Auto-Submit: Michael Pratt <mpratt@google.com>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Reviewed-by: Rhys Hiltner <rhys@justin.tv>
This commit is contained in:
Michael Pratt 2023-12-04 14:28:30 -05:00 committed by Gopher Robot
parent 6e33a6376e
commit 98fd8f5768
5 changed files with 39 additions and 38 deletions

View File

@ -152,13 +152,14 @@ It is a comma-separated list of name=val pairs setting these named variables:
risk in that scenario. Currently not supported on Windows, plan9 or js/wasm. Setting this
option for some applications can produce large traces, so use with care.
profileruntimelocks: setting profileruntimelocks=1 includes call stacks related to
contention on runtime-internal locks in the "mutex" profile, subject to the
MutexProfileFraction setting. The call stacks will correspond to the unlock call that
released the lock. But instead of the value corresponding to the amount of contention that
call stack caused, it corresponds to the amount of time the caller of unlock had to wait
in its original call to lock. A future release is expected to align those and remove this
setting.
runtimecontentionstacks: setting runtimecontentionstacks=1 enables inclusion of call stacks
related to contention on runtime-internal locks in the "mutex" profile, subject to the
MutexProfileFraction setting. When runtimecontentionstacks=0, contention on
runtime-internal locks will report as "runtime._LostContendedRuntimeLock". When
runtimecontentionstacks=1, the call stacks will correspond to the unlock call that released
the lock. But instead of the value corresponding to the amount of contention that call
stack caused, it corresponds to the amount of time the caller of unlock had to wait in its
original call to lock. A future release is expected to align those and remove this setting.
invalidptr: invalidptr=1 (the default) causes the garbage collector and stack
copier to crash the program if an invalid pointer value (for example, 1)

View File

@ -956,12 +956,12 @@ func TestRuntimeLockMetricsAndProfile(t *testing.T) {
{
before := os.Getenv("GODEBUG")
for _, s := range strings.Split(before, ",") {
if strings.HasPrefix(s, "profileruntimelocks=") {
if strings.HasPrefix(s, "runtimecontentionstacks=") {
t.Logf("GODEBUG includes explicit setting %q", s)
}
}
defer func() { os.Setenv("GODEBUG", before) }()
os.Setenv("GODEBUG", fmt.Sprintf("%s,profileruntimelocks=1", before))
os.Setenv("GODEBUG", fmt.Sprintf("%s,runtimecontentionstacks=1", before))
}
t.Logf("NumCPU %d", runtime.NumCPU())

View File

@ -552,7 +552,7 @@ func saveblockevent(cycles, rate int64, skip int, which bucketType) {
// previous lock call took (like the user-space "block" profile).
//
// Thus, reporting the call stacks of runtime-internal lock contention is
// guarded by GODEBUG for now. Set GODEBUG=profileruntimelocks=1 to enable.
// guarded by GODEBUG for now. Set GODEBUG=runtimecontentionstacks=1 to enable.
//
// TODO(rhysh): plumb through the delay duration, remove GODEBUG, update comment
//
@ -644,7 +644,7 @@ func (prof *mLockProfile) recordLock(cycles int64, l *mutex) {
if prev := prof.cycles; prev > 0 {
// We can only store one call stack for runtime-internal lock contention
// on this M, and we've already got one. Decide which should stay, and
// add the other to the report for runtime._LostContendedLock.
// add the other to the report for runtime._LostContendedRuntimeLock.
prevScore := uint64(cheaprand64()) % uint64(prev)
thisScore := uint64(cheaprand64()) % uint64(cycles)
if prevScore > thisScore {
@ -690,8 +690,8 @@ func (prof *mLockProfile) captureStack() {
}
prof.pending = 0
if debug.profileruntimelocks.Load() == 0 {
prof.stack[0] = abi.FuncPCABIInternal(_LostContendedLock) + sys.PCQuantum
if debug.runtimeContentionStacks.Load() == 0 {
prof.stack[0] = abi.FuncPCABIInternal(_LostContendedRuntimeLock) + sys.PCQuantum
prof.stack[1] = 0
return
}
@ -733,7 +733,7 @@ func (prof *mLockProfile) store() {
saveBlockEventStack(cycles, rate, prof.stack[:nstk], mutexProfile)
if lost > 0 {
lostStk := [...]uintptr{
abi.FuncPCABIInternal(_LostContendedLock) + sys.PCQuantum,
abi.FuncPCABIInternal(_LostContendedRuntimeLock) + sys.PCQuantum,
}
saveBlockEventStack(lost, rate, lostStk[:], mutexProfile)
}

View File

@ -5279,7 +5279,7 @@ func _ExternalCode() { _ExternalCode() }
func _LostExternalCode() { _LostExternalCode() }
func _GC() { _GC() }
func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
func _LostContendedLock() { _LostContendedLock() }
func _LostContendedRuntimeLock() { _LostContendedRuntimeLock() }
func _VDSO() { _VDSO() }
// Called if we receive a SIGPROF signal.

View File

@ -307,28 +307,28 @@ type dbgVar struct {
// existing int var for that value, which may
// already have an initial value.
var debug struct {
cgocheck int32
clobberfree int32
disablethp int32
dontfreezetheworld int32
efence int32
gccheckmark int32
gcpacertrace int32
gcshrinkstackoff int32
gcstoptheworld int32
gctrace int32
invalidptr int32
madvdontneed int32 // for Linux; issue 28466
profileruntimelocks atomic.Int32
scavtrace int32
scheddetail int32
schedtrace int32
tracebackancestors int32
asyncpreemptoff int32
harddecommit int32
adaptivestackstart int32
tracefpunwindoff int32
traceadvanceperiod int32
cgocheck int32
clobberfree int32
disablethp int32
dontfreezetheworld int32
efence int32
gccheckmark int32
gcpacertrace int32
gcshrinkstackoff int32
gcstoptheworld int32
gctrace int32
invalidptr int32
madvdontneed int32 // for Linux; issue 28466
runtimeContentionStacks atomic.Int32
scavtrace int32
scheddetail int32
schedtrace int32
tracebackancestors int32
asyncpreemptoff int32
harddecommit int32
adaptivestackstart int32
tracefpunwindoff int32
traceadvanceperiod int32
// debug.malloc is used as a combined debug check
// in the malloc function and should be set
@ -355,7 +355,7 @@ var dbgvars = []*dbgVar{
{name: "gctrace", value: &debug.gctrace},
{name: "invalidptr", value: &debug.invalidptr},
{name: "madvdontneed", value: &debug.madvdontneed},
{name: "profileruntimelocks", atomic: &debug.profileruntimelocks},
{name: "runtimecontentionstacks", atomic: &debug.runtimeContentionStacks},
{name: "sbrk", value: &debug.sbrk},
{name: "scavtrace", value: &debug.scavtrace},
{name: "scheddetail", value: &debug.scheddetail},