2014-08-21 00:07:42 -06:00
|
|
|
// Copyright 2009 The Go Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
2014-09-01 16:51:12 -06:00
|
|
|
// Malloc profiling.
|
|
|
|
// Patterned after tcmalloc's algorithms; shorter code.
|
|
|
|
|
2014-08-21 00:07:42 -06:00
|
|
|
package runtime
|
|
|
|
|
|
|
|
import (
|
2015-11-02 12:09:24 -07:00
|
|
|
"runtime/internal/atomic"
|
2014-08-21 00:07:42 -06:00
|
|
|
"unsafe"
|
|
|
|
)
|
|
|
|
|
|
|
|
// NOTE(rsc): Everything here could use cas if contention became an issue.
|
2014-08-27 21:32:49 -06:00
|
|
|
var proflock mutex
|
2014-08-21 00:07:42 -06:00
|
|
|
|
|
|
|
// All memory allocations are local and do not escape outside of the profiler.
|
|
|
|
// The profiler is forbidden from referring to garbage-collected memory.
|
|
|
|
|
2014-09-01 16:51:12 -06:00
|
|
|
const (
|
|
|
|
// profile types
|
|
|
|
memProfile bucketType = 1 + iota
|
|
|
|
blockProfile
|
2016-09-22 07:48:30 -06:00
|
|
|
mutexProfile
|
2014-08-31 22:06:26 -06:00
|
|
|
|
2014-09-01 16:51:12 -06:00
|
|
|
// size of bucket hash table
|
|
|
|
buckHashSize = 179999
|
|
|
|
|
|
|
|
// max depth of stack to record in bucket
|
|
|
|
maxStack = 32
|
|
|
|
)
|
|
|
|
|
|
|
|
type bucketType int
|
|
|
|
|
|
|
|
// A bucket holds per-call-stack profiling information.
|
|
|
|
// The representation is a bit sleazy, inherited from C.
|
|
|
|
// This struct defines the bucket header. It is followed in
|
|
|
|
// memory by the stack words and then the actual record
|
|
|
|
// data, either a memRecord or a blockRecord.
|
|
|
|
//
|
2014-08-31 22:06:26 -06:00
|
|
|
// Per-call-stack profiling information.
|
|
|
|
// Lookup by hashing call stack into a linked-list hash table.
|
2016-10-11 20:58:21 -06:00
|
|
|
//
|
|
|
|
// No heap pointers.
|
|
|
|
//
|
|
|
|
//go:notinheap
|
2014-09-01 16:51:12 -06:00
|
|
|
type bucket struct {
|
|
|
|
next *bucket
|
|
|
|
allnext *bucket
|
2016-09-22 07:48:30 -06:00
|
|
|
typ bucketType // memBucket or blockBucket (includes mutexProfile)
|
2014-09-01 16:51:12 -06:00
|
|
|
hash uintptr
|
|
|
|
size uintptr
|
|
|
|
nstk uintptr
|
|
|
|
}
|
|
|
|
|
|
|
|
// A memRecord is the bucket data for a bucket of type memProfile,
|
|
|
|
// part of the memory profile.
|
|
|
|
type memRecord struct {
|
|
|
|
// The following complex 3-stage scheme of stats accumulation
|
|
|
|
// is required to obtain a consistent picture of mallocs and frees
|
|
|
|
// for some point in time.
|
|
|
|
// The problem is that mallocs come in real time, while frees
|
|
|
|
// come only after a GC during concurrent sweeping. So if we would
|
|
|
|
// naively count them, we would get a skew toward mallocs.
|
|
|
|
//
|
2017-02-27 09:36:37 -07:00
|
|
|
// Hence, we delay information to get consistent snapshots as
|
|
|
|
// of mark termination. Allocations count toward the next mark
|
|
|
|
// termination's snapshot, while sweep frees count toward the
|
|
|
|
// previous mark termination's snapshot:
|
|
|
|
//
|
|
|
|
// MT MT MT MT
|
|
|
|
// .·| .·| .·| .·|
|
|
|
|
// .·˙ | .·˙ | .·˙ | .·˙ |
|
|
|
|
// .·˙ | .·˙ | .·˙ | .·˙ |
|
|
|
|
// .·˙ |.·˙ |.·˙ |.·˙ |
|
|
|
|
//
|
|
|
|
// alloc → ▲ ← free
|
|
|
|
// ┠┅┅┅┅┅┅┅┅┅┅┅P
|
2017-03-01 11:58:22 -07:00
|
|
|
// C+2 → C+1 → C
|
2017-02-27 09:36:37 -07:00
|
|
|
//
|
|
|
|
// alloc → ▲ ← free
|
|
|
|
// ┠┅┅┅┅┅┅┅┅┅┅┅P
|
2017-03-01 11:58:22 -07:00
|
|
|
// C+2 → C+1 → C
|
2017-02-27 09:36:37 -07:00
|
|
|
//
|
|
|
|
// Since we can't publish a consistent snapshot until all of
|
|
|
|
// the sweep frees are accounted for, we wait until the next
|
|
|
|
// mark termination ("MT" above) to publish the previous mark
|
2017-03-01 11:58:22 -07:00
|
|
|
// termination's snapshot ("P" above). To do this, allocation
|
|
|
|
// and free events are accounted to *future* heap profile
|
|
|
|
// cycles ("C+n" above) and we only publish a cycle once all
|
|
|
|
// of the events from that cycle must be done. Specifically:
|
2017-02-27 09:36:37 -07:00
|
|
|
//
|
2017-03-01 11:58:22 -07:00
|
|
|
// Mallocs are accounted to cycle C+2.
|
|
|
|
// Explicit frees are accounted to cycle C+2.
|
|
|
|
// GC frees (done during sweeping) are accounted to cycle C+1.
|
|
|
|
//
|
|
|
|
// After mark termination, we increment the global heap
|
|
|
|
// profile cycle counter and accumulate the stats from cycle C
|
|
|
|
// into the active profile.
|
2017-03-01 09:50:38 -07:00
|
|
|
|
|
|
|
// active is the currently published profile. A profiling
|
|
|
|
// cycle can be accumulated into active once its complete.
|
|
|
|
active memRecordCycle
|
2014-09-01 16:51:12 -06:00
|
|
|
|
2017-03-01 11:58:22 -07:00
|
|
|
// future records the profile events we're counting for cycles
|
|
|
|
// that have not yet been published. This is ring buffer
|
|
|
|
// indexed by the global heap profile cycle C and stores
|
|
|
|
// cycles C, C+1, and C+2. Unlike active, these counts are
|
|
|
|
// only for a single cycle; they are not cumulative across
|
|
|
|
// cycles.
|
|
|
|
//
|
|
|
|
// We store cycle C here because there's a window between when
|
|
|
|
// C becomes the active cycle and when we've flushed it to
|
|
|
|
// active.
|
|
|
|
future [3]memRecordCycle
|
2017-03-01 09:50:38 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// memRecordCycle
|
|
|
|
type memRecordCycle struct {
|
|
|
|
allocs, frees uintptr
|
|
|
|
alloc_bytes, free_bytes uintptr
|
|
|
|
}
|
|
|
|
|
|
|
|
// add accumulates b into a. It does not zero b.
|
|
|
|
func (a *memRecordCycle) add(b *memRecordCycle) {
|
|
|
|
a.allocs += b.allocs
|
|
|
|
a.frees += b.frees
|
|
|
|
a.alloc_bytes += b.alloc_bytes
|
|
|
|
a.free_bytes += b.free_bytes
|
2014-09-01 16:51:12 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
// A blockRecord is the bucket data for a bucket of type blockProfile,
|
2016-09-22 07:48:30 -06:00
|
|
|
// which is used in blocking and mutex profiles.
|
2014-09-01 16:51:12 -06:00
|
|
|
type blockRecord struct {
|
|
|
|
count int64
|
|
|
|
cycles int64
|
|
|
|
}
|
2014-08-31 22:06:26 -06:00
|
|
|
|
2014-08-21 00:07:42 -06:00
|
|
|
var (
|
2014-09-01 16:51:12 -06:00
|
|
|
mbuckets *bucket // memory profile buckets
|
|
|
|
bbuckets *bucket // blocking profile buckets
|
2016-09-22 07:48:30 -06:00
|
|
|
xbuckets *bucket // mutex profile buckets
|
2014-09-01 16:51:12 -06:00
|
|
|
buckhash *[179999]*bucket
|
|
|
|
bucketmem uintptr
|
2017-03-01 11:58:22 -07:00
|
|
|
|
|
|
|
mProf struct {
|
|
|
|
// All fields in mProf are protected by proflock.
|
|
|
|
|
|
|
|
// cycle is the global heap profile cycle. This wraps
|
|
|
|
// at mProfCycleWrap.
|
|
|
|
cycle uint32
|
|
|
|
// flushed indicates that future[cycle] in all buckets
|
|
|
|
// has been flushed to the active profile.
|
|
|
|
flushed bool
|
|
|
|
}
|
2014-08-21 00:07:42 -06:00
|
|
|
)
|
|
|
|
|
2017-03-01 11:58:22 -07:00
|
|
|
const mProfCycleWrap = uint32(len(memRecord{}.future)) * (2 << 24)
|
|
|
|
|
2014-09-01 16:51:12 -06:00
|
|
|
// newBucket allocates a bucket with the given type and number of stack entries.
|
|
|
|
func newBucket(typ bucketType, nstk int) *bucket {
|
|
|
|
size := unsafe.Sizeof(bucket{}) + uintptr(nstk)*unsafe.Sizeof(uintptr(0))
|
|
|
|
switch typ {
|
|
|
|
default:
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("invalid profile bucket type")
|
2014-09-01 16:51:12 -06:00
|
|
|
case memProfile:
|
|
|
|
size += unsafe.Sizeof(memRecord{})
|
2016-09-22 07:48:30 -06:00
|
|
|
case blockProfile, mutexProfile:
|
2014-09-01 16:51:12 -06:00
|
|
|
size += unsafe.Sizeof(blockRecord{})
|
|
|
|
}
|
|
|
|
|
|
|
|
b := (*bucket)(persistentalloc(size, 0, &memstats.buckhash_sys))
|
|
|
|
bucketmem += size
|
|
|
|
b.typ = typ
|
|
|
|
b.nstk = uintptr(nstk)
|
|
|
|
return b
|
2014-08-31 22:06:26 -06:00
|
|
|
}
|
|
|
|
|
2014-09-01 16:51:12 -06:00
|
|
|
// stk returns the slice in b holding the stack.
|
|
|
|
func (b *bucket) stk() []uintptr {
|
|
|
|
stk := (*[maxStack]uintptr)(add(unsafe.Pointer(b), unsafe.Sizeof(*b)))
|
|
|
|
return stk[:b.nstk:b.nstk]
|
|
|
|
}
|
2014-08-31 22:06:26 -06:00
|
|
|
|
2014-09-01 16:51:12 -06:00
|
|
|
// mp returns the memRecord associated with the memProfile bucket b.
|
|
|
|
func (b *bucket) mp() *memRecord {
|
|
|
|
if b.typ != memProfile {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("bad use of bucket.mp")
|
2014-09-01 16:51:12 -06:00
|
|
|
}
|
|
|
|
data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
|
|
|
|
return (*memRecord)(data)
|
|
|
|
}
|
|
|
|
|
|
|
|
// bp returns the blockRecord associated with the blockProfile bucket b.
|
|
|
|
func (b *bucket) bp() *blockRecord {
|
2016-09-22 07:48:30 -06:00
|
|
|
if b.typ != blockProfile && b.typ != mutexProfile {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("bad use of bucket.bp")
|
2014-09-01 16:51:12 -06:00
|
|
|
}
|
|
|
|
data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
|
|
|
|
return (*blockRecord)(data)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return the bucket for stk[0:nstk], allocating new bucket if needed.
|
|
|
|
func stkbucket(typ bucketType, size uintptr, stk []uintptr, alloc bool) *bucket {
|
|
|
|
if buckhash == nil {
|
|
|
|
buckhash = (*[buckHashSize]*bucket)(sysAlloc(unsafe.Sizeof(*buckhash), &memstats.buckhash_sys))
|
|
|
|
if buckhash == nil {
|
2014-12-27 21:58:00 -07:00
|
|
|
throw("runtime: cannot allocate memory")
|
2014-09-01 16:51:12 -06:00
|
|
|
}
|
2014-08-31 22:06:26 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
// Hash stack.
|
2014-09-01 16:51:12 -06:00
|
|
|
var h uintptr
|
|
|
|
for _, pc := range stk {
|
|
|
|
h += pc
|
|
|
|
h += h << 10
|
|
|
|
h ^= h >> 6
|
2014-08-31 22:06:26 -06:00
|
|
|
}
|
|
|
|
// hash in size
|
|
|
|
h += size
|
2014-09-01 16:51:12 -06:00
|
|
|
h += h << 10
|
|
|
|
h ^= h >> 6
|
2014-08-31 22:06:26 -06:00
|
|
|
// finalize
|
2014-09-01 16:51:12 -06:00
|
|
|
h += h << 3
|
|
|
|
h ^= h >> 11
|
2014-08-31 22:06:26 -06:00
|
|
|
|
2014-09-01 16:51:12 -06:00
|
|
|
i := int(h % buckHashSize)
|
|
|
|
for b := buckhash[i]; b != nil; b = b.next {
|
|
|
|
if b.typ == typ && b.hash == h && b.size == size && eqslice(b.stk(), stk) {
|
2014-08-31 22:06:26 -06:00
|
|
|
return b
|
2014-09-01 16:51:12 -06:00
|
|
|
}
|
|
|
|
}
|
2014-08-31 22:06:26 -06:00
|
|
|
|
2014-09-01 16:51:12 -06:00
|
|
|
if !alloc {
|
2014-08-31 22:06:26 -06:00
|
|
|
return nil
|
2014-09-01 16:51:12 -06:00
|
|
|
}
|
2014-08-31 22:06:26 -06:00
|
|
|
|
2014-09-01 16:51:12 -06:00
|
|
|
// Create new bucket.
|
|
|
|
b := newBucket(typ, len(stk))
|
|
|
|
copy(b.stk(), stk)
|
2014-08-31 22:06:26 -06:00
|
|
|
b.hash = h
|
|
|
|
b.size = size
|
|
|
|
b.next = buckhash[i]
|
|
|
|
buckhash[i] = b
|
2014-09-01 16:51:12 -06:00
|
|
|
if typ == memProfile {
|
2014-08-31 22:06:26 -06:00
|
|
|
b.allnext = mbuckets
|
|
|
|
mbuckets = b
|
2016-09-22 07:48:30 -06:00
|
|
|
} else if typ == mutexProfile {
|
|
|
|
b.allnext = xbuckets
|
|
|
|
xbuckets = b
|
2014-08-31 22:06:26 -06:00
|
|
|
} else {
|
|
|
|
b.allnext = bbuckets
|
|
|
|
bbuckets = b
|
|
|
|
}
|
|
|
|
return b
|
|
|
|
}
|
|
|
|
|
2014-09-01 16:51:12 -06:00
|
|
|
func eqslice(x, y []uintptr) bool {
|
|
|
|
if len(x) != len(y) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
for i, xi := range x {
|
|
|
|
if xi != y[i] {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
2014-08-31 22:06:26 -06:00
|
|
|
|
2017-03-01 11:58:22 -07:00
|
|
|
// mProf_NextCycle publishes the next heap profile cycle and creates a
|
|
|
|
// fresh heap profile cycle. This operation is fast and can be done
|
|
|
|
// during STW. The caller must call mProf_Flush before calling
|
|
|
|
// mProf_NextCycle again.
|
|
|
|
//
|
|
|
|
// This is called by mark termination during STW so allocations and
|
|
|
|
// frees after the world is started again count towards a new heap
|
|
|
|
// profiling cycle.
|
|
|
|
func mProf_NextCycle() {
|
|
|
|
lock(&proflock)
|
|
|
|
// We explicitly wrap mProf.cycle rather than depending on
|
|
|
|
// uint wraparound because the memRecord.future ring does not
|
|
|
|
// itself wrap at a power of two.
|
|
|
|
mProf.cycle = (mProf.cycle + 1) % mProfCycleWrap
|
|
|
|
mProf.flushed = false
|
|
|
|
unlock(&proflock)
|
2014-08-31 22:06:26 -06:00
|
|
|
}
|
|
|
|
|
2017-03-01 11:58:22 -07:00
|
|
|
// mProf_Flush flushes the events from the current heap profiling
|
|
|
|
// cycle into the active profile. After this it is safe to start a new
|
|
|
|
// heap profiling cycle with mProf_NextCycle.
|
|
|
|
//
|
|
|
|
// This is called by GC after mark termination starts the world. In
|
|
|
|
// contrast with mProf_NextCycle, this is somewhat expensive, but safe
|
|
|
|
// to do concurrently.
|
|
|
|
func mProf_Flush() {
|
2014-08-31 22:06:26 -06:00
|
|
|
lock(&proflock)
|
2017-03-01 11:58:22 -07:00
|
|
|
if !mProf.flushed {
|
|
|
|
mProf_FlushLocked()
|
|
|
|
mProf.flushed = true
|
|
|
|
}
|
2014-08-31 22:06:26 -06:00
|
|
|
unlock(&proflock)
|
|
|
|
}
|
|
|
|
|
2017-03-01 11:58:22 -07:00
|
|
|
func mProf_FlushLocked() {
|
|
|
|
c := mProf.cycle
|
|
|
|
for b := mbuckets; b != nil; b = b.allnext {
|
|
|
|
mp := b.mp()
|
|
|
|
|
|
|
|
// Flush cycle C into the published profile and clear
|
|
|
|
// it for reuse.
|
|
|
|
mpc := &mp.future[c%uint32(len(mp.future))]
|
|
|
|
mp.active.add(mpc)
|
|
|
|
*mpc = memRecordCycle{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-23 19:50:19 -07:00
|
|
|
// mProf_PostSweep records that all sweep frees for this GC cycle have
|
|
|
|
// completed. This has the effect of publishing the heap profile
|
|
|
|
// snapshot as of the last mark termination without advancing the heap
|
|
|
|
// profile cycle.
|
|
|
|
func mProf_PostSweep() {
|
|
|
|
lock(&proflock)
|
|
|
|
// Flush cycle C+1 to the active profile so everything as of
|
|
|
|
// the last mark termination becomes visible. *Don't* advance
|
|
|
|
// the cycle, since we're still accumulating allocs in cycle
|
|
|
|
// C+2, which have to become C+1 in the next mark termination
|
|
|
|
// and so on.
|
|
|
|
c := mProf.cycle
|
|
|
|
for b := mbuckets; b != nil; b = b.allnext {
|
|
|
|
mp := b.mp()
|
|
|
|
mpc := &mp.future[(c+1)%uint32(len(mp.future))]
|
|
|
|
mp.active.add(mpc)
|
|
|
|
*mpc = memRecordCycle{}
|
|
|
|
}
|
|
|
|
unlock(&proflock)
|
|
|
|
}
|
|
|
|
|
2014-08-31 22:06:26 -06:00
|
|
|
// Called by malloc to record a profiled block.
|
2014-09-01 16:51:12 -06:00
|
|
|
func mProf_Malloc(p unsafe.Pointer, size uintptr) {
|
|
|
|
var stk [maxStack]uintptr
|
2015-02-24 22:41:21 -07:00
|
|
|
nstk := callers(4, stk[:])
|
2014-08-31 22:06:26 -06:00
|
|
|
lock(&proflock)
|
2014-09-01 16:51:12 -06:00
|
|
|
b := stkbucket(memProfile, size, stk[:nstk], true)
|
2017-03-01 11:58:22 -07:00
|
|
|
c := mProf.cycle
|
2014-09-01 16:51:12 -06:00
|
|
|
mp := b.mp()
|
2017-03-01 11:58:22 -07:00
|
|
|
mpc := &mp.future[(c+2)%uint32(len(mp.future))]
|
|
|
|
mpc.allocs++
|
|
|
|
mpc.alloc_bytes += size
|
2014-08-31 22:06:26 -06:00
|
|
|
unlock(&proflock)
|
|
|
|
|
|
|
|
// Setprofilebucket locks a bunch of other mutexes, so we call it outside of proflock.
|
|
|
|
// This reduces potential contention and chances of deadlocks.
|
2014-09-01 16:51:12 -06:00
|
|
|
// Since the object must be alive during call to mProf_Malloc,
|
2014-08-31 22:06:26 -06:00
|
|
|
// it's fine to do this non-atomically.
|
[dev.cc] runtime: delete scalararg, ptrarg; rename onM to systemstack
Scalararg and ptrarg are not "signal safe".
Go code filling them out can be interrupted by a signal,
and then the signal handler runs, and if it also ends up
in Go code that uses scalararg or ptrarg, now the old
values have been smashed.
For the pieces of code that do need to run in a signal handler,
we introduced onM_signalok, which is really just onM
except that the _signalok is meant to convey that the caller
asserts that scalarg and ptrarg will be restored to their old
values after the call (instead of the usual behavior, zeroing them).
Scalararg and ptrarg are also untyped and therefore error-prone.
Go code can always pass a closure instead of using scalararg
and ptrarg; they were only really necessary for C code.
And there's no more C code.
For all these reasons, delete scalararg and ptrarg, converting
the few remaining references to use closures.
Once those are gone, there is no need for a distinction between
onM and onM_signalok, so replace both with a single function
equivalent to the current onM_signalok (that is, it can be called
on any of the curg, g0, and gsignal stacks).
The name onM and the phrase 'm stack' are misnomers,
because on most system an M has two system stacks:
the main thread stack and the signal handling stack.
Correct the misnomer by naming the replacement function systemstack.
Fix a few references to "M stack" in code.
The main motivation for this change is to eliminate scalararg/ptrarg.
Rick and I have already seen them cause problems because
the calling sequence m.ptrarg[0] = p is a heap pointer assignment,
so it gets a write barrier. The write barrier also uses onM, so it has
all the same problems as if it were being invoked by a signal handler.
We worked around this by saving and restoring the old values
and by calling onM_signalok, but there's no point in keeping this nice
home for bugs around any longer.
This CL also changes funcline to return the file name as a result
instead of filling in a passed-in *string. (The *string signature is
left over from when the code was written in and called from C.)
That's arguably an unrelated change, except that once I had done
the ptrarg/scalararg/onM cleanup I started getting false positives
about the *string argument escaping (not allowed in package runtime).
The compiler is wrong, but the easiest fix is to write the code like
Go code instead of like C code. I am a bit worried that the compiler
is wrong because of some use of uninitialized memory in the escape
analysis. If that's the reason, it will go away when we convert the
compiler to Go. (And if not, we'll debug it the next time.)
LGTM=khr
R=r, khr
CC=austin, golang-codereviews, iant, rlh
https://golang.org/cl/174950043
2014-11-12 12:54:31 -07:00
|
|
|
systemstack(func() {
|
2014-11-11 15:05:02 -07:00
|
|
|
setprofilebucket(p, b)
|
|
|
|
})
|
2014-09-03 22:54:06 -06:00
|
|
|
}
|
2014-09-01 16:51:12 -06:00
|
|
|
|
|
|
|
// Called when freeing a profiled block.
|
2015-11-03 12:00:21 -07:00
|
|
|
func mProf_Free(b *bucket, size uintptr) {
|
2014-08-31 22:06:26 -06:00
|
|
|
lock(&proflock)
|
2017-03-01 11:58:22 -07:00
|
|
|
c := mProf.cycle
|
2014-09-01 16:51:12 -06:00
|
|
|
mp := b.mp()
|
2017-03-01 11:58:22 -07:00
|
|
|
mpc := &mp.future[(c+1)%uint32(len(mp.future))]
|
|
|
|
mpc.frees++
|
|
|
|
mpc.free_bytes += size
|
2014-08-31 22:06:26 -06:00
|
|
|
unlock(&proflock)
|
|
|
|
}
|
|
|
|
|
2014-09-01 16:51:12 -06:00
|
|
|
var blockprofilerate uint64 // in CPU ticks
|
2014-08-31 22:06:26 -06:00
|
|
|
|
2014-09-01 16:51:12 -06:00
|
|
|
// SetBlockProfileRate controls the fraction of goroutine blocking events
|
2016-03-01 16:21:55 -07:00
|
|
|
// that are reported in the blocking profile. The profiler aims to sample
|
2014-09-01 16:51:12 -06:00
|
|
|
// an average of one blocking event per rate nanoseconds spent blocked.
|
|
|
|
//
|
|
|
|
// To include every blocking event in the profile, pass rate = 1.
|
|
|
|
// To turn off profiling entirely, pass rate <= 0.
|
|
|
|
func SetBlockProfileRate(rate int) {
|
|
|
|
var r int64
|
|
|
|
if rate <= 0 {
|
|
|
|
r = 0 // disable profiling
|
2014-10-20 16:48:42 -06:00
|
|
|
} else if rate == 1 {
|
|
|
|
r = 1 // profile everything
|
2014-09-01 16:51:12 -06:00
|
|
|
} else {
|
2014-08-31 22:06:26 -06:00
|
|
|
// convert ns to cycles, use float64 to prevent overflow during multiplication
|
2014-09-01 16:51:12 -06:00
|
|
|
r = int64(float64(rate) * float64(tickspersecond()) / (1000 * 1000 * 1000))
|
|
|
|
if r == 0 {
|
2014-08-31 22:06:26 -06:00
|
|
|
r = 1
|
2014-09-01 16:51:12 -06:00
|
|
|
}
|
2014-08-31 22:06:26 -06:00
|
|
|
}
|
2014-09-01 16:51:12 -06:00
|
|
|
|
2015-11-02 12:09:24 -07:00
|
|
|
atomic.Store64(&blockprofilerate, uint64(r))
|
2014-08-31 22:06:26 -06:00
|
|
|
}
|
|
|
|
|
2014-09-01 16:51:12 -06:00
|
|
|
func blockevent(cycles int64, skip int) {
|
|
|
|
if cycles <= 0 {
|
2014-10-20 16:48:42 -06:00
|
|
|
cycles = 1
|
2014-09-01 16:51:12 -06:00
|
|
|
}
|
2016-09-22 07:48:30 -06:00
|
|
|
if blocksampled(cycles) {
|
2017-03-10 10:07:42 -07:00
|
|
|
saveblockevent(cycles, skip+1, blockProfile)
|
2016-09-22 07:48:30 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func blocksampled(cycles int64) bool {
|
2015-11-02 12:09:24 -07:00
|
|
|
rate := int64(atomic.Load64(&blockprofilerate))
|
2016-06-28 10:22:46 -06:00
|
|
|
if rate <= 0 || (rate > cycles && int64(fastrand())%rate > cycles) {
|
2016-09-22 07:48:30 -06:00
|
|
|
return false
|
2014-09-01 16:51:12 -06:00
|
|
|
}
|
2016-09-22 07:48:30 -06:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2017-03-10 10:07:42 -07:00
|
|
|
func saveblockevent(cycles int64, skip int, which bucketType) {
|
2014-09-01 16:51:12 -06:00
|
|
|
gp := getg()
|
|
|
|
var nstk int
|
|
|
|
var stk [maxStack]uintptr
|
|
|
|
if gp.m.curg == nil || gp.m.curg == gp {
|
2015-02-24 22:41:21 -07:00
|
|
|
nstk = callers(skip, stk[:])
|
2014-09-01 16:51:12 -06:00
|
|
|
} else {
|
2015-02-24 22:41:21 -07:00
|
|
|
nstk = gcallers(gp.m.curg, skip, stk[:])
|
2014-09-01 16:51:12 -06:00
|
|
|
}
|
2014-08-31 22:06:26 -06:00
|
|
|
lock(&proflock)
|
2016-09-22 07:48:30 -06:00
|
|
|
b := stkbucket(which, 0, stk[:nstk], true)
|
2014-09-01 16:51:12 -06:00
|
|
|
b.bp().count++
|
|
|
|
b.bp().cycles += cycles
|
2014-08-31 22:06:26 -06:00
|
|
|
unlock(&proflock)
|
|
|
|
}
|
|
|
|
|
2016-09-22 07:48:30 -06:00
|
|
|
var mutexprofilerate uint64 // fraction sampled
|
|
|
|
|
|
|
|
// SetMutexProfileFraction controls the fraction of mutex contention events
|
|
|
|
// that are reported in the mutex profile. On average 1/rate events are
|
|
|
|
// reported. The previous rate is returned.
|
|
|
|
//
|
|
|
|
// To turn off profiling entirely, pass rate 0.
|
2018-04-19 10:24:53 -06:00
|
|
|
// To just read the current rate, pass rate < 0.
|
2016-09-22 07:48:30 -06:00
|
|
|
// (For n>1 the details of sampling may change.)
|
|
|
|
func SetMutexProfileFraction(rate int) int {
|
|
|
|
if rate < 0 {
|
|
|
|
return int(mutexprofilerate)
|
|
|
|
}
|
|
|
|
old := mutexprofilerate
|
|
|
|
atomic.Store64(&mutexprofilerate, uint64(rate))
|
|
|
|
return int(old)
|
|
|
|
}
|
|
|
|
|
|
|
|
//go:linkname mutexevent sync.event
|
|
|
|
func mutexevent(cycles int64, skip int) {
|
2016-10-28 13:12:18 -06:00
|
|
|
if cycles < 0 {
|
|
|
|
cycles = 0
|
|
|
|
}
|
2016-09-22 07:48:30 -06:00
|
|
|
rate := int64(atomic.Load64(&mutexprofilerate))
|
|
|
|
// TODO(pjw): measure impact of always calling fastrand vs using something
|
|
|
|
// like malloc.go:nextSample()
|
|
|
|
if rate > 0 && int64(fastrand())%rate == 0 {
|
2017-03-10 10:07:42 -07:00
|
|
|
saveblockevent(cycles, skip+1, mutexProfile)
|
2016-09-22 07:48:30 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-31 22:06:26 -06:00
|
|
|
// Go interface to profile data.
|
|
|
|
|
2014-09-01 16:51:12 -06:00
|
|
|
// A StackRecord describes a single execution stack.
|
|
|
|
type StackRecord struct {
|
|
|
|
Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stack returns the stack trace associated with the record,
|
|
|
|
// a prefix of r.Stack0.
|
|
|
|
func (r *StackRecord) Stack() []uintptr {
|
|
|
|
for i, v := range r.Stack0 {
|
|
|
|
if v == 0 {
|
|
|
|
return r.Stack0[0:i]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return r.Stack0[0:]
|
|
|
|
}
|
|
|
|
|
|
|
|
// MemProfileRate controls the fraction of memory allocations
|
|
|
|
// that are recorded and reported in the memory profile.
|
|
|
|
// The profiler aims to sample an average of
|
|
|
|
// one allocation per MemProfileRate bytes allocated.
|
|
|
|
//
|
|
|
|
// To include every allocated block in the profile, set MemProfileRate to 1.
|
|
|
|
// To turn off profiling entirely, set MemProfileRate to 0.
|
|
|
|
//
|
|
|
|
// The tools that process the memory profiles assume that the
|
|
|
|
// profile rate is constant across the lifetime of the program
|
2016-03-01 16:21:55 -07:00
|
|
|
// and equal to the current value. Programs that change the
|
2014-09-01 16:51:12 -06:00
|
|
|
// memory profiling rate should do so just once, as early as
|
|
|
|
// possible in the execution of the program (for example,
|
|
|
|
// at the beginning of main).
|
|
|
|
var MemProfileRate int = 512 * 1024
|
|
|
|
|
|
|
|
// A MemProfileRecord describes the live objects allocated
|
|
|
|
// by a particular call sequence (stack trace).
|
|
|
|
type MemProfileRecord struct {
|
|
|
|
AllocBytes, FreeBytes int64 // number of bytes allocated, freed
|
|
|
|
AllocObjects, FreeObjects int64 // number of objects allocated, freed
|
|
|
|
Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry
|
|
|
|
}
|
|
|
|
|
|
|
|
// InUseBytes returns the number of bytes in use (AllocBytes - FreeBytes).
|
|
|
|
func (r *MemProfileRecord) InUseBytes() int64 { return r.AllocBytes - r.FreeBytes }
|
|
|
|
|
|
|
|
// InUseObjects returns the number of objects in use (AllocObjects - FreeObjects).
|
|
|
|
func (r *MemProfileRecord) InUseObjects() int64 {
|
|
|
|
return r.AllocObjects - r.FreeObjects
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stack returns the stack trace associated with the record,
|
|
|
|
// a prefix of r.Stack0.
|
|
|
|
func (r *MemProfileRecord) Stack() []uintptr {
|
|
|
|
for i, v := range r.Stack0 {
|
|
|
|
if v == 0 {
|
|
|
|
return r.Stack0[0:i]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return r.Stack0[0:]
|
|
|
|
}
|
|
|
|
|
2015-11-12 15:33:15 -07:00
|
|
|
// MemProfile returns a profile of memory allocated and freed per allocation
|
|
|
|
// site.
|
|
|
|
//
|
2014-08-21 00:07:42 -06:00
|
|
|
// MemProfile returns n, the number of records in the current memory profile.
|
|
|
|
// If len(p) >= n, MemProfile copies the profile into p and returns n, true.
|
|
|
|
// If len(p) < n, MemProfile does not change p and returns n, false.
|
|
|
|
//
|
|
|
|
// If inuseZero is true, the profile includes allocation records
|
|
|
|
// where r.AllocBytes > 0 but r.AllocBytes == r.FreeBytes.
|
|
|
|
// These are sites where memory was allocated, but it has all
|
|
|
|
// been released back to the runtime.
|
|
|
|
//
|
2015-11-12 15:33:15 -07:00
|
|
|
// The returned profile may be up to two garbage collection cycles old.
|
|
|
|
// This is to avoid skewing the profile toward allocations; because
|
|
|
|
// allocations happen in real time but frees are delayed until the garbage
|
|
|
|
// collector performs sweeping, the profile only accounts for allocations
|
|
|
|
// that have had a chance to be freed by the garbage collector.
|
|
|
|
//
|
2014-08-21 00:07:42 -06:00
|
|
|
// Most clients should use the runtime/pprof package or
|
|
|
|
// the testing package's -test.memprofile flag instead
|
|
|
|
// of calling MemProfile directly.
|
|
|
|
func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) {
|
2014-08-27 21:32:49 -06:00
|
|
|
lock(&proflock)
|
2017-03-01 11:58:22 -07:00
|
|
|
// If we're between mProf_NextCycle and mProf_Flush, take care
|
|
|
|
// of flushing to the active profile so we only have to look
|
|
|
|
// at the active profile below.
|
|
|
|
mProf_FlushLocked()
|
2014-08-21 00:07:42 -06:00
|
|
|
clear := true
|
|
|
|
for b := mbuckets; b != nil; b = b.allnext {
|
2014-09-01 16:51:12 -06:00
|
|
|
mp := b.mp()
|
2017-03-01 09:50:38 -07:00
|
|
|
if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes {
|
2014-08-21 00:07:42 -06:00
|
|
|
n++
|
|
|
|
}
|
2017-03-01 09:50:38 -07:00
|
|
|
if mp.active.allocs != 0 || mp.active.frees != 0 {
|
2014-08-21 00:07:42 -06:00
|
|
|
clear = false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if clear {
|
|
|
|
// Absolutely no data, suggesting that a garbage collection
|
|
|
|
// has not yet happened. In order to allow profiling when
|
|
|
|
// garbage collection is disabled from the beginning of execution,
|
2017-03-01 11:58:22 -07:00
|
|
|
// accumulate all of the cycles, and recount buckets.
|
2014-08-21 00:07:42 -06:00
|
|
|
n = 0
|
|
|
|
for b := mbuckets; b != nil; b = b.allnext {
|
2014-09-01 16:51:12 -06:00
|
|
|
mp := b.mp()
|
2017-03-01 11:58:22 -07:00
|
|
|
for c := range mp.future {
|
|
|
|
mp.active.add(&mp.future[c])
|
|
|
|
mp.future[c] = memRecordCycle{}
|
|
|
|
}
|
2017-03-01 09:50:38 -07:00
|
|
|
if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes {
|
2014-08-21 00:07:42 -06:00
|
|
|
n++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if n <= len(p) {
|
|
|
|
ok = true
|
|
|
|
idx := 0
|
|
|
|
for b := mbuckets; b != nil; b = b.allnext {
|
2014-09-01 16:51:12 -06:00
|
|
|
mp := b.mp()
|
2017-03-01 09:50:38 -07:00
|
|
|
if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes {
|
2014-08-21 00:07:42 -06:00
|
|
|
record(&p[idx], b)
|
|
|
|
idx++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-08-27 21:32:49 -06:00
|
|
|
unlock(&proflock)
|
2014-08-21 00:07:42 -06:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write b's data to r.
|
|
|
|
func record(r *MemProfileRecord, b *bucket) {
|
2014-09-01 16:51:12 -06:00
|
|
|
mp := b.mp()
|
2017-03-01 09:50:38 -07:00
|
|
|
r.AllocBytes = int64(mp.active.alloc_bytes)
|
|
|
|
r.FreeBytes = int64(mp.active.free_bytes)
|
|
|
|
r.AllocObjects = int64(mp.active.allocs)
|
|
|
|
r.FreeObjects = int64(mp.active.frees)
|
2016-09-21 10:44:40 -06:00
|
|
|
if raceenabled {
|
2017-09-22 13:16:26 -06:00
|
|
|
racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(), funcPC(MemProfile))
|
2016-09-21 10:44:40 -06:00
|
|
|
}
|
|
|
|
if msanenabled {
|
|
|
|
msanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
|
|
|
|
}
|
2014-09-01 16:51:12 -06:00
|
|
|
copy(r.Stack0[:], b.stk())
|
|
|
|
for i := int(b.nstk); i < len(r.Stack0); i++ {
|
2014-08-21 00:07:42 -06:00
|
|
|
r.Stack0[i] = 0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-01 16:51:12 -06:00
|
|
|
func iterate_memprof(fn func(*bucket, uintptr, *uintptr, uintptr, uintptr, uintptr)) {
|
2014-08-31 22:06:26 -06:00
|
|
|
lock(&proflock)
|
2014-09-01 16:51:12 -06:00
|
|
|
for b := mbuckets; b != nil; b = b.allnext {
|
|
|
|
mp := b.mp()
|
2017-03-01 09:50:38 -07:00
|
|
|
fn(b, b.nstk, &b.stk()[0], b.size, mp.active.allocs, mp.active.frees)
|
2014-08-31 22:06:26 -06:00
|
|
|
}
|
|
|
|
unlock(&proflock)
|
|
|
|
}
|
2014-09-01 16:51:12 -06:00
|
|
|
|
|
|
|
// BlockProfileRecord describes blocking events originated
|
|
|
|
// at a particular call sequence (stack trace).
|
|
|
|
type BlockProfileRecord struct {
|
|
|
|
Count int64
|
|
|
|
Cycles int64
|
|
|
|
StackRecord
|
|
|
|
}
|
2014-08-31 22:06:26 -06:00
|
|
|
|
2014-08-21 00:07:42 -06:00
|
|
|
// BlockProfile returns n, the number of records in the current blocking profile.
|
|
|
|
// If len(p) >= n, BlockProfile copies the profile into p and returns n, true.
|
|
|
|
// If len(p) < n, BlockProfile does not change p and returns n, false.
|
|
|
|
//
|
|
|
|
// Most clients should use the runtime/pprof package or
|
|
|
|
// the testing package's -test.blockprofile flag instead
|
|
|
|
// of calling BlockProfile directly.
|
|
|
|
func BlockProfile(p []BlockProfileRecord) (n int, ok bool) {
|
2014-08-27 21:32:49 -06:00
|
|
|
lock(&proflock)
|
2014-08-21 00:07:42 -06:00
|
|
|
for b := bbuckets; b != nil; b = b.allnext {
|
|
|
|
n++
|
|
|
|
}
|
|
|
|
if n <= len(p) {
|
|
|
|
ok = true
|
|
|
|
for b := bbuckets; b != nil; b = b.allnext {
|
2014-09-01 16:51:12 -06:00
|
|
|
bp := b.bp()
|
|
|
|
r := &p[0]
|
2016-02-29 16:01:00 -07:00
|
|
|
r.Count = bp.count
|
|
|
|
r.Cycles = bp.cycles
|
2016-09-21 10:44:40 -06:00
|
|
|
if raceenabled {
|
2017-09-22 13:16:26 -06:00
|
|
|
racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(), funcPC(BlockProfile))
|
2016-09-21 10:44:40 -06:00
|
|
|
}
|
|
|
|
if msanenabled {
|
|
|
|
msanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
|
|
|
|
}
|
2014-09-01 16:51:12 -06:00
|
|
|
i := copy(r.Stack0[:], b.stk())
|
|
|
|
for ; i < len(r.Stack0); i++ {
|
|
|
|
r.Stack0[i] = 0
|
2014-08-21 00:07:42 -06:00
|
|
|
}
|
2014-09-01 16:51:12 -06:00
|
|
|
p = p[1:]
|
2014-08-21 00:07:42 -06:00
|
|
|
}
|
|
|
|
}
|
2014-08-27 21:32:49 -06:00
|
|
|
unlock(&proflock)
|
2014-08-21 00:07:42 -06:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2016-09-22 07:48:30 -06:00
|
|
|
// MutexProfile returns n, the number of records in the current mutex profile.
|
|
|
|
// If len(p) >= n, MutexProfile copies the profile into p and returns n, true.
|
|
|
|
// Otherwise, MutexProfile does not change p, and returns n, false.
|
|
|
|
//
|
|
|
|
// Most clients should use the runtime/pprof package
|
|
|
|
// instead of calling MutexProfile directly.
|
|
|
|
func MutexProfile(p []BlockProfileRecord) (n int, ok bool) {
|
|
|
|
lock(&proflock)
|
|
|
|
for b := xbuckets; b != nil; b = b.allnext {
|
|
|
|
n++
|
|
|
|
}
|
|
|
|
if n <= len(p) {
|
|
|
|
ok = true
|
|
|
|
for b := xbuckets; b != nil; b = b.allnext {
|
|
|
|
bp := b.bp()
|
|
|
|
r := &p[0]
|
|
|
|
r.Count = int64(bp.count)
|
|
|
|
r.Cycles = bp.cycles
|
|
|
|
i := copy(r.Stack0[:], b.stk())
|
|
|
|
for ; i < len(r.Stack0); i++ {
|
|
|
|
r.Stack0[i] = 0
|
|
|
|
}
|
|
|
|
p = p[1:]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
unlock(&proflock)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-08-31 22:06:26 -06:00
|
|
|
// ThreadCreateProfile returns n, the number of records in the thread creation profile.
|
|
|
|
// If len(p) >= n, ThreadCreateProfile copies the profile into p and returns n, true.
|
|
|
|
// If len(p) < n, ThreadCreateProfile does not change p and returns n, false.
|
|
|
|
//
|
|
|
|
// Most clients should use the runtime/pprof package instead
|
|
|
|
// of calling ThreadCreateProfile directly.
|
|
|
|
func ThreadCreateProfile(p []StackRecord) (n int, ok bool) {
|
2015-11-02 12:09:24 -07:00
|
|
|
first := (*m)(atomic.Loadp(unsafe.Pointer(&allm)))
|
2014-08-31 22:06:26 -06:00
|
|
|
for mp := first; mp != nil; mp = mp.alllink {
|
|
|
|
n++
|
|
|
|
}
|
|
|
|
if n <= len(p) {
|
|
|
|
ok = true
|
|
|
|
i := 0
|
|
|
|
for mp := first; mp != nil; mp = mp.alllink {
|
2016-02-22 12:27:32 -07:00
|
|
|
p[i].Stack0 = mp.createstack
|
2014-08-31 22:06:26 -06:00
|
|
|
i++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-09-01 16:51:12 -06:00
|
|
|
// GoroutineProfile returns n, the number of records in the active goroutine stack profile.
|
|
|
|
// If len(p) >= n, GoroutineProfile copies the profile into p and returns n, true.
|
|
|
|
// If len(p) < n, GoroutineProfile does not change p and returns n, false.
|
|
|
|
//
|
|
|
|
// Most clients should use the runtime/pprof package instead
|
|
|
|
// of calling GoroutineProfile directly.
|
|
|
|
func GoroutineProfile(p []StackRecord) (n int, ok bool) {
|
2016-01-26 20:58:59 -07:00
|
|
|
gp := getg()
|
|
|
|
|
|
|
|
isOK := func(gp1 *g) bool {
|
|
|
|
// Checking isSystemGoroutine here makes GoroutineProfile
|
|
|
|
// consistent with both NumGoroutine and Stack.
|
|
|
|
return gp1 != gp && readgstatus(gp1) != _Gdead && !isSystemGoroutine(gp1)
|
|
|
|
}
|
|
|
|
|
|
|
|
stopTheWorld("profile")
|
|
|
|
|
|
|
|
n = 1
|
|
|
|
for _, gp1 := range allgs {
|
|
|
|
if isOK(gp1) {
|
|
|
|
n++
|
|
|
|
}
|
|
|
|
}
|
2014-08-31 22:06:26 -06:00
|
|
|
|
2014-09-01 16:51:12 -06:00
|
|
|
if n <= len(p) {
|
2016-01-26 20:58:59 -07:00
|
|
|
ok = true
|
|
|
|
r := p
|
|
|
|
|
|
|
|
// Save current goroutine.
|
2018-04-26 12:06:08 -06:00
|
|
|
sp := getcallersp()
|
2017-09-22 13:16:26 -06:00
|
|
|
pc := getcallerpc()
|
2016-01-26 20:58:59 -07:00
|
|
|
systemstack(func() {
|
|
|
|
saveg(pc, sp, gp, &r[0])
|
|
|
|
})
|
|
|
|
r = r[1:]
|
|
|
|
|
|
|
|
// Save other goroutines.
|
|
|
|
for _, gp1 := range allgs {
|
|
|
|
if isOK(gp1) {
|
2016-01-26 21:00:41 -07:00
|
|
|
if len(r) == 0 {
|
|
|
|
// Should be impossible, but better to return a
|
|
|
|
// truncated profile than to crash the entire process.
|
|
|
|
break
|
|
|
|
}
|
2014-09-01 16:51:12 -06:00
|
|
|
saveg(^uintptr(0), ^uintptr(0), gp1, &r[0])
|
|
|
|
r = r[1:]
|
2014-08-31 22:06:26 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-26 20:58:59 -07:00
|
|
|
startTheWorld()
|
|
|
|
|
2014-09-01 16:51:12 -06:00
|
|
|
return n, ok
|
|
|
|
}
|
2014-08-31 22:06:26 -06:00
|
|
|
|
2014-09-01 16:51:12 -06:00
|
|
|
func saveg(pc, sp uintptr, gp *g, r *StackRecord) {
|
runtime: fix line number in first stack frame in printed stack trace
Originally traceback was only used for printing the stack
when an unexpected signal came in. In that case, the
initial PC is taken from the signal and should be used
unaltered. For the callers, the PC is the return address,
which might be on the line after the call; we subtract 1
to get to the CALL instruction.
Traceback is now used for a variety of things, and for
almost all of those the initial PC is a return address,
whether from getcallerpc, or gp->sched.pc, or gp->syscallpc.
In those cases, we need to subtract 1 from this initial PC,
but the traceback code had a hard rule "never subtract 1
from the initial PC", left over from the signal handling days.
Change gentraceback to take a flag that specifies whether
we are tracing a trap.
Change traceback to default to "starting with a return PC",
which is the overwhelmingly common case.
Add tracebacktrap, like traceback but starting with a trap PC.
Use tracebacktrap in signal handlers.
Fixes #7690.
LGTM=iant, r
R=r, iant
CC=golang-codereviews
https://golang.org/cl/167810044
2014-10-29 13:14:24 -06:00
|
|
|
n := gentraceback(pc, sp, 0, gp, 0, &r.Stack0[0], len(r.Stack0), nil, nil, 0)
|
runtime: convert traceback*.c to Go
The two converted files were nearly identical.
Instead of continuing that duplication, I merged them
into a single traceback.go.
Tested on arm, amd64, amd64p32, and 386.
LGTM=r
R=golang-codereviews, remyoudompheng, dave, r
CC=dvyukov, golang-codereviews, iant, khr
https://golang.org/cl/134200044
2014-09-02 13:12:53 -06:00
|
|
|
if n < len(r.Stack0) {
|
2014-09-01 16:51:12 -06:00
|
|
|
r.Stack0[n] = 0
|
|
|
|
}
|
2014-08-31 22:06:26 -06:00
|
|
|
}
|
|
|
|
|
2014-08-26 00:34:46 -06:00
|
|
|
// Stack formats a stack trace of the calling goroutine into buf
|
|
|
|
// and returns the number of bytes written to buf.
|
|
|
|
// If all is true, Stack formats stack traces of all other goroutines
|
|
|
|
// into buf after the trace for the current goroutine.
|
|
|
|
func Stack(buf []byte, all bool) int {
|
|
|
|
if all {
|
2015-05-15 14:00:50 -06:00
|
|
|
stopTheWorld("stack trace")
|
2014-08-26 00:34:46 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
n := 0
|
|
|
|
if len(buf) > 0 {
|
2014-12-15 15:39:28 -07:00
|
|
|
gp := getg()
|
2018-04-26 12:06:08 -06:00
|
|
|
sp := getcallersp()
|
2017-09-22 13:16:26 -06:00
|
|
|
pc := getcallerpc()
|
[dev.cc] runtime: delete scalararg, ptrarg; rename onM to systemstack
Scalararg and ptrarg are not "signal safe".
Go code filling them out can be interrupted by a signal,
and then the signal handler runs, and if it also ends up
in Go code that uses scalararg or ptrarg, now the old
values have been smashed.
For the pieces of code that do need to run in a signal handler,
we introduced onM_signalok, which is really just onM
except that the _signalok is meant to convey that the caller
asserts that scalarg and ptrarg will be restored to their old
values after the call (instead of the usual behavior, zeroing them).
Scalararg and ptrarg are also untyped and therefore error-prone.
Go code can always pass a closure instead of using scalararg
and ptrarg; they were only really necessary for C code.
And there's no more C code.
For all these reasons, delete scalararg and ptrarg, converting
the few remaining references to use closures.
Once those are gone, there is no need for a distinction between
onM and onM_signalok, so replace both with a single function
equivalent to the current onM_signalok (that is, it can be called
on any of the curg, g0, and gsignal stacks).
The name onM and the phrase 'm stack' are misnomers,
because on most system an M has two system stacks:
the main thread stack and the signal handling stack.
Correct the misnomer by naming the replacement function systemstack.
Fix a few references to "M stack" in code.
The main motivation for this change is to eliminate scalararg/ptrarg.
Rick and I have already seen them cause problems because
the calling sequence m.ptrarg[0] = p is a heap pointer assignment,
so it gets a write barrier. The write barrier also uses onM, so it has
all the same problems as if it were being invoked by a signal handler.
We worked around this by saving and restoring the old values
and by calling onM_signalok, but there's no point in keeping this nice
home for bugs around any longer.
This CL also changes funcline to return the file name as a result
instead of filling in a passed-in *string. (The *string signature is
left over from when the code was written in and called from C.)
That's arguably an unrelated change, except that once I had done
the ptrarg/scalararg/onM cleanup I started getting false positives
about the *string argument escaping (not allowed in package runtime).
The compiler is wrong, but the easiest fix is to write the code like
Go code instead of like C code. I am a bit worried that the compiler
is wrong because of some use of uninitialized memory in the escape
analysis. If that's the reason, it will go away when we convert the
compiler to Go. (And if not, we'll debug it the next time.)
LGTM=khr
R=r, khr
CC=austin, golang-codereviews, iant, rlh
https://golang.org/cl/174950043
2014-11-12 12:54:31 -07:00
|
|
|
systemstack(func() {
|
runtime: avoid gentraceback of self on user goroutine stack
Gentraceback may grow the stack.
One of the gentraceback wrappers may grow the stack.
One of the gentraceback callback calls may grow the stack.
Various stack pointers are stored in various stack locations
as type uintptr during the execution of these calls.
If the stack does grow, these stack pointers will not be
updated and will start trying to decode stack memory that
is no longer valid.
It may be possible to change the type of the stack pointer
variables to be unsafe.Pointer, but that's pretty subtle and
may still have problems, even if we catch every last one.
An easier, more obviously correct fix is to require that
gentraceback of the currently running goroutine must run
on the g0 stack, not on the goroutine's own stack.
Not doing this causes faults when you set
StackFromSystem = 1
StackFaultOnFree = 1
The new check in gentraceback will catch future lapses.
The more general problem is calling getcallersp but then
calling a function that might relocate the stack, which would
invalidate the result of getcallersp. Add note to stubs.go
declaration of getcallersp explaining the problem, and
check all existing calls to getcallersp. Most needed fixes.
This affects Callers, Stack, and nearly all the runtime
profiling routines. It does not affect stack copying directly
nor garbage collection.
LGTM=khr
R=khr, bradfitz
CC=golang-codereviews, r
https://golang.org/cl/167060043
2014-11-05 21:01:48 -07:00
|
|
|
g0 := getg()
|
2016-01-06 19:16:01 -07:00
|
|
|
// Force traceback=1 to override GOTRACEBACK setting,
|
|
|
|
// so that Stack's results are consistent.
|
|
|
|
// GOTRACEBACK is only about crash dumps.
|
|
|
|
g0.m.traceback = 1
|
runtime: avoid gentraceback of self on user goroutine stack
Gentraceback may grow the stack.
One of the gentraceback wrappers may grow the stack.
One of the gentraceback callback calls may grow the stack.
Various stack pointers are stored in various stack locations
as type uintptr during the execution of these calls.
If the stack does grow, these stack pointers will not be
updated and will start trying to decode stack memory that
is no longer valid.
It may be possible to change the type of the stack pointer
variables to be unsafe.Pointer, but that's pretty subtle and
may still have problems, even if we catch every last one.
An easier, more obviously correct fix is to require that
gentraceback of the currently running goroutine must run
on the g0 stack, not on the goroutine's own stack.
Not doing this causes faults when you set
StackFromSystem = 1
StackFaultOnFree = 1
The new check in gentraceback will catch future lapses.
The more general problem is calling getcallersp but then
calling a function that might relocate the stack, which would
invalidate the result of getcallersp. Add note to stubs.go
declaration of getcallersp explaining the problem, and
check all existing calls to getcallersp. Most needed fixes.
This affects Callers, Stack, and nearly all the runtime
profiling routines. It does not affect stack copying directly
nor garbage collection.
LGTM=khr
R=khr, bradfitz
CC=golang-codereviews, r
https://golang.org/cl/167060043
2014-11-05 21:01:48 -07:00
|
|
|
g0.writebuf = buf[0:0:len(buf)]
|
|
|
|
goroutineheader(gp)
|
|
|
|
traceback(pc, sp, 0, gp)
|
|
|
|
if all {
|
|
|
|
tracebackothers(gp)
|
|
|
|
}
|
2016-01-06 19:16:01 -07:00
|
|
|
g0.m.traceback = 0
|
runtime: avoid gentraceback of self on user goroutine stack
Gentraceback may grow the stack.
One of the gentraceback wrappers may grow the stack.
One of the gentraceback callback calls may grow the stack.
Various stack pointers are stored in various stack locations
as type uintptr during the execution of these calls.
If the stack does grow, these stack pointers will not be
updated and will start trying to decode stack memory that
is no longer valid.
It may be possible to change the type of the stack pointer
variables to be unsafe.Pointer, but that's pretty subtle and
may still have problems, even if we catch every last one.
An easier, more obviously correct fix is to require that
gentraceback of the currently running goroutine must run
on the g0 stack, not on the goroutine's own stack.
Not doing this causes faults when you set
StackFromSystem = 1
StackFaultOnFree = 1
The new check in gentraceback will catch future lapses.
The more general problem is calling getcallersp but then
calling a function that might relocate the stack, which would
invalidate the result of getcallersp. Add note to stubs.go
declaration of getcallersp explaining the problem, and
check all existing calls to getcallersp. Most needed fixes.
This affects Callers, Stack, and nearly all the runtime
profiling routines. It does not affect stack copying directly
nor garbage collection.
LGTM=khr
R=khr, bradfitz
CC=golang-codereviews, r
https://golang.org/cl/167060043
2014-11-05 21:01:48 -07:00
|
|
|
n = len(g0.writebuf)
|
|
|
|
g0.writebuf = nil
|
|
|
|
})
|
2014-08-26 00:34:46 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
if all {
|
2015-05-15 14:00:50 -06:00
|
|
|
startTheWorld()
|
2014-08-26 00:34:46 -06:00
|
|
|
}
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
2014-08-31 22:06:26 -06:00
|
|
|
// Tracing of alloc/free/gc.
|
|
|
|
|
2014-09-01 16:51:12 -06:00
|
|
|
var tracelock mutex
|
2014-08-31 22:06:26 -06:00
|
|
|
|
2014-09-01 16:51:12 -06:00
|
|
|
func tracealloc(p unsafe.Pointer, size uintptr, typ *_type) {
|
2014-08-31 22:06:26 -06:00
|
|
|
lock(&tracelock)
|
2014-09-01 16:51:12 -06:00
|
|
|
gp := getg()
|
|
|
|
gp.m.traceback = 2
|
|
|
|
if typ == nil {
|
|
|
|
print("tracealloc(", p, ", ", hex(size), ")\n")
|
|
|
|
} else {
|
2016-04-07 14:29:16 -06:00
|
|
|
print("tracealloc(", p, ", ", hex(size), ", ", typ.string(), ")\n")
|
2014-09-01 16:51:12 -06:00
|
|
|
}
|
|
|
|
if gp.m.curg == nil || gp == gp.m.curg {
|
|
|
|
goroutineheader(gp)
|
2017-09-22 13:16:26 -06:00
|
|
|
pc := getcallerpc()
|
2018-04-26 12:06:08 -06:00
|
|
|
sp := getcallersp()
|
[dev.cc] runtime: delete scalararg, ptrarg; rename onM to systemstack
Scalararg and ptrarg are not "signal safe".
Go code filling them out can be interrupted by a signal,
and then the signal handler runs, and if it also ends up
in Go code that uses scalararg or ptrarg, now the old
values have been smashed.
For the pieces of code that do need to run in a signal handler,
we introduced onM_signalok, which is really just onM
except that the _signalok is meant to convey that the caller
asserts that scalarg and ptrarg will be restored to their old
values after the call (instead of the usual behavior, zeroing them).
Scalararg and ptrarg are also untyped and therefore error-prone.
Go code can always pass a closure instead of using scalararg
and ptrarg; they were only really necessary for C code.
And there's no more C code.
For all these reasons, delete scalararg and ptrarg, converting
the few remaining references to use closures.
Once those are gone, there is no need for a distinction between
onM and onM_signalok, so replace both with a single function
equivalent to the current onM_signalok (that is, it can be called
on any of the curg, g0, and gsignal stacks).
The name onM and the phrase 'm stack' are misnomers,
because on most system an M has two system stacks:
the main thread stack and the signal handling stack.
Correct the misnomer by naming the replacement function systemstack.
Fix a few references to "M stack" in code.
The main motivation for this change is to eliminate scalararg/ptrarg.
Rick and I have already seen them cause problems because
the calling sequence m.ptrarg[0] = p is a heap pointer assignment,
so it gets a write barrier. The write barrier also uses onM, so it has
all the same problems as if it were being invoked by a signal handler.
We worked around this by saving and restoring the old values
and by calling onM_signalok, but there's no point in keeping this nice
home for bugs around any longer.
This CL also changes funcline to return the file name as a result
instead of filling in a passed-in *string. (The *string signature is
left over from when the code was written in and called from C.)
That's arguably an unrelated change, except that once I had done
the ptrarg/scalararg/onM cleanup I started getting false positives
about the *string argument escaping (not allowed in package runtime).
The compiler is wrong, but the easiest fix is to write the code like
Go code instead of like C code. I am a bit worried that the compiler
is wrong because of some use of uninitialized memory in the escape
analysis. If that's the reason, it will go away when we convert the
compiler to Go. (And if not, we'll debug it the next time.)
LGTM=khr
R=r, khr
CC=austin, golang-codereviews, iant, rlh
https://golang.org/cl/174950043
2014-11-12 12:54:31 -07:00
|
|
|
systemstack(func() {
|
runtime: avoid gentraceback of self on user goroutine stack
Gentraceback may grow the stack.
One of the gentraceback wrappers may grow the stack.
One of the gentraceback callback calls may grow the stack.
Various stack pointers are stored in various stack locations
as type uintptr during the execution of these calls.
If the stack does grow, these stack pointers will not be
updated and will start trying to decode stack memory that
is no longer valid.
It may be possible to change the type of the stack pointer
variables to be unsafe.Pointer, but that's pretty subtle and
may still have problems, even if we catch every last one.
An easier, more obviously correct fix is to require that
gentraceback of the currently running goroutine must run
on the g0 stack, not on the goroutine's own stack.
Not doing this causes faults when you set
StackFromSystem = 1
StackFaultOnFree = 1
The new check in gentraceback will catch future lapses.
The more general problem is calling getcallersp but then
calling a function that might relocate the stack, which would
invalidate the result of getcallersp. Add note to stubs.go
declaration of getcallersp explaining the problem, and
check all existing calls to getcallersp. Most needed fixes.
This affects Callers, Stack, and nearly all the runtime
profiling routines. It does not affect stack copying directly
nor garbage collection.
LGTM=khr
R=khr, bradfitz
CC=golang-codereviews, r
https://golang.org/cl/167060043
2014-11-05 21:01:48 -07:00
|
|
|
traceback(pc, sp, 0, gp)
|
|
|
|
})
|
2014-08-31 22:06:26 -06:00
|
|
|
} else {
|
2014-09-01 16:51:12 -06:00
|
|
|
goroutineheader(gp.m.curg)
|
|
|
|
traceback(^uintptr(0), ^uintptr(0), 0, gp.m.curg)
|
2014-08-21 00:07:42 -06:00
|
|
|
}
|
2014-09-01 16:51:12 -06:00
|
|
|
print("\n")
|
|
|
|
gp.m.traceback = 0
|
2014-08-31 22:06:26 -06:00
|
|
|
unlock(&tracelock)
|
|
|
|
}
|
|
|
|
|
2014-09-01 16:51:12 -06:00
|
|
|
func tracefree(p unsafe.Pointer, size uintptr) {
|
2014-08-31 22:06:26 -06:00
|
|
|
lock(&tracelock)
|
2014-09-01 16:51:12 -06:00
|
|
|
gp := getg()
|
|
|
|
gp.m.traceback = 2
|
|
|
|
print("tracefree(", p, ", ", hex(size), ")\n")
|
|
|
|
goroutineheader(gp)
|
2017-09-22 13:16:26 -06:00
|
|
|
pc := getcallerpc()
|
2018-04-26 12:06:08 -06:00
|
|
|
sp := getcallersp()
|
[dev.cc] runtime: delete scalararg, ptrarg; rename onM to systemstack
Scalararg and ptrarg are not "signal safe".
Go code filling them out can be interrupted by a signal,
and then the signal handler runs, and if it also ends up
in Go code that uses scalararg or ptrarg, now the old
values have been smashed.
For the pieces of code that do need to run in a signal handler,
we introduced onM_signalok, which is really just onM
except that the _signalok is meant to convey that the caller
asserts that scalarg and ptrarg will be restored to their old
values after the call (instead of the usual behavior, zeroing them).
Scalararg and ptrarg are also untyped and therefore error-prone.
Go code can always pass a closure instead of using scalararg
and ptrarg; they were only really necessary for C code.
And there's no more C code.
For all these reasons, delete scalararg and ptrarg, converting
the few remaining references to use closures.
Once those are gone, there is no need for a distinction between
onM and onM_signalok, so replace both with a single function
equivalent to the current onM_signalok (that is, it can be called
on any of the curg, g0, and gsignal stacks).
The name onM and the phrase 'm stack' are misnomers,
because on most system an M has two system stacks:
the main thread stack and the signal handling stack.
Correct the misnomer by naming the replacement function systemstack.
Fix a few references to "M stack" in code.
The main motivation for this change is to eliminate scalararg/ptrarg.
Rick and I have already seen them cause problems because
the calling sequence m.ptrarg[0] = p is a heap pointer assignment,
so it gets a write barrier. The write barrier also uses onM, so it has
all the same problems as if it were being invoked by a signal handler.
We worked around this by saving and restoring the old values
and by calling onM_signalok, but there's no point in keeping this nice
home for bugs around any longer.
This CL also changes funcline to return the file name as a result
instead of filling in a passed-in *string. (The *string signature is
left over from when the code was written in and called from C.)
That's arguably an unrelated change, except that once I had done
the ptrarg/scalararg/onM cleanup I started getting false positives
about the *string argument escaping (not allowed in package runtime).
The compiler is wrong, but the easiest fix is to write the code like
Go code instead of like C code. I am a bit worried that the compiler
is wrong because of some use of uninitialized memory in the escape
analysis. If that's the reason, it will go away when we convert the
compiler to Go. (And if not, we'll debug it the next time.)
LGTM=khr
R=r, khr
CC=austin, golang-codereviews, iant, rlh
https://golang.org/cl/174950043
2014-11-12 12:54:31 -07:00
|
|
|
systemstack(func() {
|
runtime: avoid gentraceback of self on user goroutine stack
Gentraceback may grow the stack.
One of the gentraceback wrappers may grow the stack.
One of the gentraceback callback calls may grow the stack.
Various stack pointers are stored in various stack locations
as type uintptr during the execution of these calls.
If the stack does grow, these stack pointers will not be
updated and will start trying to decode stack memory that
is no longer valid.
It may be possible to change the type of the stack pointer
variables to be unsafe.Pointer, but that's pretty subtle and
may still have problems, even if we catch every last one.
An easier, more obviously correct fix is to require that
gentraceback of the currently running goroutine must run
on the g0 stack, not on the goroutine's own stack.
Not doing this causes faults when you set
StackFromSystem = 1
StackFaultOnFree = 1
The new check in gentraceback will catch future lapses.
The more general problem is calling getcallersp but then
calling a function that might relocate the stack, which would
invalidate the result of getcallersp. Add note to stubs.go
declaration of getcallersp explaining the problem, and
check all existing calls to getcallersp. Most needed fixes.
This affects Callers, Stack, and nearly all the runtime
profiling routines. It does not affect stack copying directly
nor garbage collection.
LGTM=khr
R=khr, bradfitz
CC=golang-codereviews, r
https://golang.org/cl/167060043
2014-11-05 21:01:48 -07:00
|
|
|
traceback(pc, sp, 0, gp)
|
|
|
|
})
|
2014-09-01 16:51:12 -06:00
|
|
|
print("\n")
|
|
|
|
gp.m.traceback = 0
|
2014-08-31 22:06:26 -06:00
|
|
|
unlock(&tracelock)
|
|
|
|
}
|
|
|
|
|
2014-09-01 16:51:12 -06:00
|
|
|
func tracegc() {
|
2014-08-31 22:06:26 -06:00
|
|
|
lock(&tracelock)
|
2014-09-01 16:51:12 -06:00
|
|
|
gp := getg()
|
|
|
|
gp.m.traceback = 2
|
|
|
|
print("tracegc()\n")
|
|
|
|
// running on m->g0 stack; show all non-g0 goroutines
|
|
|
|
tracebackothers(gp)
|
|
|
|
print("end tracegc\n")
|
|
|
|
print("\n")
|
|
|
|
gp.m.traceback = 0
|
2014-08-31 22:06:26 -06:00
|
|
|
unlock(&tracelock)
|
2014-08-21 00:07:42 -06:00
|
|
|
}
|