1
0
mirror of https://github.com/golang/go synced 2024-11-18 05:04:47 -07:00

net/http/pprof: support the "seconds" param for block, mutex profiles

When the seconds param is given, the block and mutex profile endpoints
report the difference between two measurements collected the given
seconds apart. Historically, the block and mutex profiles have reported
the cumulative counts since the process start, and it turned out they
are more useful when interpreted along with the time duration.

Note: cpu profile and trace endpoints already accept the "seconds"
parameter. With this CL, the block and mutex profile endpoints will
accept the "seconds" parameter. Providing the "seconds" parameter
to other types of profiles is an error.

This change moves runtime/pprof/internal/profile to internal/profile and
adds part of merge logic from github.com/google/pprof/profile/merge.go to
internal/profile, in order to allow both net/http/pprof and runtime/pprof
to access it.

Fixes #23401

Change-Id: Ie2486f1a63eb8ff210d7d3bc2de683e9335fd5cd
Reviewed-on: https://go-review.googlesource.com/c/go/+/147598
Run-TryBot: Hyang-Ah Hana Kim <hyangah@gmail.com>
Reviewed-by: Emmanuel Odeke <emm.odeke@gmail.com>
This commit is contained in:
Hana Kim 2019-12-11 18:47:05 -05:00 committed by Hyang-Ah Hana Kim
parent ef5c59d47b
commit 2ff1e3ebf5
16 changed files with 841 additions and 66 deletions

View File

@ -292,7 +292,7 @@ var pkgDeps = map[string][]string{
"net/internal/socktest": {"L4", "OS", "syscall", "internal/syscall/windows"},
"net/url": {"L4"},
"plugin": {"L0", "OS", "CGO"},
"runtime/pprof/internal/profile": {"L4", "OS", "compress/gzip", "regexp"},
"internal/profile": {"L4", "OS", "compress/gzip", "regexp"},
"testing/internal/testdeps": {"L4", "internal/testlog", "runtime/pprof", "regexp"},
"text/scanner": {"L4", "OS"},
"text/template/parse": {"L4"},
@ -455,7 +455,7 @@ var pkgDeps = map[string][]string{
"golang.org/x/net/http/httpguts",
},
"net/http/httputil": {"L4", "NET", "OS", "context", "net/http", "net/http/internal", "golang.org/x/net/http/httpguts"},
"net/http/pprof": {"L4", "OS", "html/template", "net/http", "runtime/pprof", "runtime/trace"},
"net/http/pprof": {"L4", "OS", "context", "html/template", "net/http", "runtime/pprof", "runtime/trace", "internal/profile"},
"net/rpc": {"L4", "NET", "encoding/gob", "html/template", "net/http", "go/token"},
"net/rpc/jsonrpc": {"L4", "NET", "encoding/json", "net/rpc"},
}

View File

@ -0,0 +1,461 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package profile
import (
"fmt"
"sort"
"strconv"
"strings"
)
// Merge merges all the profiles in profs into a single Profile.
// Returns a new profile independent of the input profiles. The merged
// profile is compacted to eliminate unused samples, locations,
// functions and mappings. Profiles must have identical profile sample
// and period types or the merge will fail. profile.Period of the
// resulting profile will be the maximum of all profiles, and
// profile.TimeNanos will be the earliest nonzero one.
func Merge(srcs []*Profile) (*Profile, error) {
if len(srcs) == 0 {
return nil, fmt.Errorf("no profiles to merge")
}
p, err := combineHeaders(srcs)
if err != nil {
return nil, err
}
pm := &profileMerger{
p: p,
samples: make(map[sampleKey]*Sample, len(srcs[0].Sample)),
locations: make(map[locationKey]*Location, len(srcs[0].Location)),
functions: make(map[functionKey]*Function, len(srcs[0].Function)),
mappings: make(map[mappingKey]*Mapping, len(srcs[0].Mapping)),
}
for _, src := range srcs {
// Clear the profile-specific hash tables
pm.locationsByID = make(map[uint64]*Location, len(src.Location))
pm.functionsByID = make(map[uint64]*Function, len(src.Function))
pm.mappingsByID = make(map[uint64]mapInfo, len(src.Mapping))
if len(pm.mappings) == 0 && len(src.Mapping) > 0 {
// The Mapping list has the property that the first mapping
// represents the main binary. Take the first Mapping we see,
// otherwise the operations below will add mappings in an
// arbitrary order.
pm.mapMapping(src.Mapping[0])
}
for _, s := range src.Sample {
if !isZeroSample(s) {
pm.mapSample(s)
}
}
}
for _, s := range p.Sample {
if isZeroSample(s) {
// If there are any zero samples, re-merge the profile to GC
// them.
return Merge([]*Profile{p})
}
}
return p, nil
}
// Normalize normalizes the source profile by multiplying each value in profile by the
// ratio of the sum of the base profile's values of that sample type to the sum of the
// source profile's value of that sample type.
func (p *Profile) Normalize(pb *Profile) error {
if err := p.compatible(pb); err != nil {
return err
}
baseVals := make([]int64, len(p.SampleType))
for _, s := range pb.Sample {
for i, v := range s.Value {
baseVals[i] += v
}
}
srcVals := make([]int64, len(p.SampleType))
for _, s := range p.Sample {
for i, v := range s.Value {
srcVals[i] += v
}
}
normScale := make([]float64, len(baseVals))
for i := range baseVals {
if srcVals[i] == 0 {
normScale[i] = 0.0
} else {
normScale[i] = float64(baseVals[i]) / float64(srcVals[i])
}
}
p.ScaleN(normScale)
return nil
}
func isZeroSample(s *Sample) bool {
for _, v := range s.Value {
if v != 0 {
return false
}
}
return true
}
type profileMerger struct {
p *Profile
// Memoization tables within a profile.
locationsByID map[uint64]*Location
functionsByID map[uint64]*Function
mappingsByID map[uint64]mapInfo
// Memoization tables for profile entities.
samples map[sampleKey]*Sample
locations map[locationKey]*Location
functions map[functionKey]*Function
mappings map[mappingKey]*Mapping
}
type mapInfo struct {
m *Mapping
offset int64
}
func (pm *profileMerger) mapSample(src *Sample) *Sample {
s := &Sample{
Location: make([]*Location, len(src.Location)),
Value: make([]int64, len(src.Value)),
Label: make(map[string][]string, len(src.Label)),
NumLabel: make(map[string][]int64, len(src.NumLabel)),
NumUnit: make(map[string][]string, len(src.NumLabel)),
}
for i, l := range src.Location {
s.Location[i] = pm.mapLocation(l)
}
for k, v := range src.Label {
vv := make([]string, len(v))
copy(vv, v)
s.Label[k] = vv
}
for k, v := range src.NumLabel {
u := src.NumUnit[k]
vv := make([]int64, len(v))
uu := make([]string, len(u))
copy(vv, v)
copy(uu, u)
s.NumLabel[k] = vv
s.NumUnit[k] = uu
}
// Check memoization table. Must be done on the remapped location to
// account for the remapped mapping. Add current values to the
// existing sample.
k := s.key()
if ss, ok := pm.samples[k]; ok {
for i, v := range src.Value {
ss.Value[i] += v
}
return ss
}
copy(s.Value, src.Value)
pm.samples[k] = s
pm.p.Sample = append(pm.p.Sample, s)
return s
}
// key generates sampleKey to be used as a key for maps.
func (sample *Sample) key() sampleKey {
ids := make([]string, len(sample.Location))
for i, l := range sample.Location {
ids[i] = strconv.FormatUint(l.ID, 16)
}
labels := make([]string, 0, len(sample.Label))
for k, v := range sample.Label {
labels = append(labels, fmt.Sprintf("%q%q", k, v))
}
sort.Strings(labels)
numlabels := make([]string, 0, len(sample.NumLabel))
for k, v := range sample.NumLabel {
numlabels = append(numlabels, fmt.Sprintf("%q%x%x", k, v, sample.NumUnit[k]))
}
sort.Strings(numlabels)
return sampleKey{
strings.Join(ids, "|"),
strings.Join(labels, ""),
strings.Join(numlabels, ""),
}
}
type sampleKey struct {
locations string
labels string
numlabels string
}
func (pm *profileMerger) mapLocation(src *Location) *Location {
if src == nil {
return nil
}
if l, ok := pm.locationsByID[src.ID]; ok {
pm.locationsByID[src.ID] = l
return l
}
mi := pm.mapMapping(src.Mapping)
l := &Location{
ID: uint64(len(pm.p.Location) + 1),
Mapping: mi.m,
Address: uint64(int64(src.Address) + mi.offset),
Line: make([]Line, len(src.Line)),
IsFolded: src.IsFolded,
}
for i, ln := range src.Line {
l.Line[i] = pm.mapLine(ln)
}
// Check memoization table. Must be done on the remapped location to
// account for the remapped mapping ID.
k := l.key()
if ll, ok := pm.locations[k]; ok {
pm.locationsByID[src.ID] = ll
return ll
}
pm.locationsByID[src.ID] = l
pm.locations[k] = l
pm.p.Location = append(pm.p.Location, l)
return l
}
// key generates locationKey to be used as a key for maps.
func (l *Location) key() locationKey {
key := locationKey{
addr: l.Address,
isFolded: l.IsFolded,
}
if l.Mapping != nil {
// Normalizes address to handle address space randomization.
key.addr -= l.Mapping.Start
key.mappingID = l.Mapping.ID
}
lines := make([]string, len(l.Line)*2)
for i, line := range l.Line {
if line.Function != nil {
lines[i*2] = strconv.FormatUint(line.Function.ID, 16)
}
lines[i*2+1] = strconv.FormatInt(line.Line, 16)
}
key.lines = strings.Join(lines, "|")
return key
}
type locationKey struct {
addr, mappingID uint64
lines string
isFolded bool
}
func (pm *profileMerger) mapMapping(src *Mapping) mapInfo {
if src == nil {
return mapInfo{}
}
if mi, ok := pm.mappingsByID[src.ID]; ok {
return mi
}
// Check memoization tables.
mk := src.key()
if m, ok := pm.mappings[mk]; ok {
mi := mapInfo{m, int64(m.Start) - int64(src.Start)}
pm.mappingsByID[src.ID] = mi
return mi
}
m := &Mapping{
ID: uint64(len(pm.p.Mapping) + 1),
Start: src.Start,
Limit: src.Limit,
Offset: src.Offset,
File: src.File,
BuildID: src.BuildID,
HasFunctions: src.HasFunctions,
HasFilenames: src.HasFilenames,
HasLineNumbers: src.HasLineNumbers,
HasInlineFrames: src.HasInlineFrames,
}
pm.p.Mapping = append(pm.p.Mapping, m)
// Update memoization tables.
pm.mappings[mk] = m
mi := mapInfo{m, 0}
pm.mappingsByID[src.ID] = mi
return mi
}
// key generates encoded strings of Mapping to be used as a key for
// maps.
func (m *Mapping) key() mappingKey {
// Normalize addresses to handle address space randomization.
// Round up to next 4K boundary to avoid minor discrepancies.
const mapsizeRounding = 0x1000
size := m.Limit - m.Start
size = size + mapsizeRounding - 1
size = size - (size % mapsizeRounding)
key := mappingKey{
size: size,
offset: m.Offset,
}
switch {
case m.BuildID != "":
key.buildIDOrFile = m.BuildID
case m.File != "":
key.buildIDOrFile = m.File
default:
// A mapping containing neither build ID nor file name is a fake mapping. A
// key with empty buildIDOrFile is used for fake mappings so that they are
// treated as the same mapping during merging.
}
return key
}
type mappingKey struct {
size, offset uint64
buildIDOrFile string
}
func (pm *profileMerger) mapLine(src Line) Line {
ln := Line{
Function: pm.mapFunction(src.Function),
Line: src.Line,
}
return ln
}
func (pm *profileMerger) mapFunction(src *Function) *Function {
if src == nil {
return nil
}
if f, ok := pm.functionsByID[src.ID]; ok {
return f
}
k := src.key()
if f, ok := pm.functions[k]; ok {
pm.functionsByID[src.ID] = f
return f
}
f := &Function{
ID: uint64(len(pm.p.Function) + 1),
Name: src.Name,
SystemName: src.SystemName,
Filename: src.Filename,
StartLine: src.StartLine,
}
pm.functions[k] = f
pm.functionsByID[src.ID] = f
pm.p.Function = append(pm.p.Function, f)
return f
}
// key generates a struct to be used as a key for maps.
func (f *Function) key() functionKey {
return functionKey{
f.StartLine,
f.Name,
f.SystemName,
f.Filename,
}
}
type functionKey struct {
startLine int64
name, systemName, fileName string
}
// combineHeaders checks that all profiles can be merged and returns
// their combined profile.
func combineHeaders(srcs []*Profile) (*Profile, error) {
for _, s := range srcs[1:] {
if err := srcs[0].compatible(s); err != nil {
return nil, err
}
}
var timeNanos, durationNanos, period int64
var comments []string
seenComments := map[string]bool{}
var defaultSampleType string
for _, s := range srcs {
if timeNanos == 0 || s.TimeNanos < timeNanos {
timeNanos = s.TimeNanos
}
durationNanos += s.DurationNanos
if period == 0 || period < s.Period {
period = s.Period
}
for _, c := range s.Comments {
if seen := seenComments[c]; !seen {
comments = append(comments, c)
seenComments[c] = true
}
}
if defaultSampleType == "" {
defaultSampleType = s.DefaultSampleType
}
}
p := &Profile{
SampleType: make([]*ValueType, len(srcs[0].SampleType)),
DropFrames: srcs[0].DropFrames,
KeepFrames: srcs[0].KeepFrames,
TimeNanos: timeNanos,
DurationNanos: durationNanos,
PeriodType: srcs[0].PeriodType,
Period: period,
Comments: comments,
DefaultSampleType: defaultSampleType,
}
copy(p.SampleType, srcs[0].SampleType)
return p, nil
}
// compatible determines if two profiles can be compared/merged.
// returns nil if the profiles are compatible; otherwise an error with
// details on the incompatibility.
func (p *Profile) compatible(pb *Profile) error {
if !equalValueType(p.PeriodType, pb.PeriodType) {
return fmt.Errorf("incompatible period types %v and %v", p.PeriodType, pb.PeriodType)
}
if len(p.SampleType) != len(pb.SampleType) {
return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType)
}
for i := range p.SampleType {
if !equalValueType(p.SampleType[i], pb.SampleType[i]) {
return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType)
}
}
return nil
}
// equalValueType returns true if the two value types are semantically
// equal. It ignores the internal fields used during encode/decode.
func equalValueType(st1, st2 *ValueType) bool {
return st1.Type == st2.Type && st1.Unit == st2.Unit
}

View File

@ -2,11 +2,9 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package profile provides a representation of profile.proto and
// methods to encode/decode profiles in this format.
//
// This package is only for testing runtime/pprof.
// It is not used by production Go programs.
// Package profile provides a representation of
// github.com/google/pprof/proto/profile.proto and
// methods to encode/decode/merge profiles in this format.
package profile
import (
@ -60,6 +58,7 @@ type Sample struct {
Value []int64
Label map[string][]string
NumLabel map[string][]int64
NumUnit map[string][]string
locationIDX []uint64
labelX []Label
@ -96,6 +95,7 @@ type Location struct {
Mapping *Mapping
Address uint64
Line []Line
IsFolded bool
mappingIDX uint64
}
@ -575,3 +575,40 @@ func (p *Profile) Demangle(d Demangler) error {
func (p *Profile) Empty() bool {
return len(p.Sample) == 0
}
// Scale multiplies all sample values in a profile by a constant.
func (p *Profile) Scale(ratio float64) {
if ratio == 1 {
return
}
ratios := make([]float64, len(p.SampleType))
for i := range p.SampleType {
ratios[i] = ratio
}
p.ScaleN(ratios)
}
// ScaleN multiplies each sample values in a sample by a different amount.
func (p *Profile) ScaleN(ratios []float64) error {
if len(p.SampleType) != len(ratios) {
return fmt.Errorf("mismatched scale ratios, got %d, want %d", len(ratios), len(p.SampleType))
}
allOnes := true
for _, r := range ratios {
if r != 1 {
allOnes = false
break
}
}
if allOnes {
return nil
}
for _, s := range p.Sample {
for i, v := range s.Value {
if ratios[i] != 1 {
s.Value[i] = int64(float64(v) * ratios[i])
}
}
}
return nil
}

View File

@ -57,8 +57,10 @@ package pprof
import (
"bufio"
"bytes"
"context"
"fmt"
"html/template"
"internal/profile"
"io"
"log"
"net/http"
@ -234,6 +236,10 @@ func (name handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
serveError(w, http.StatusNotFound, "Unknown profile")
return
}
if sec := r.FormValue("seconds"); sec != "" {
name.serveDeltaProfile(w, r, p, sec)
return
}
gc, _ := strconv.Atoi(r.FormValue("gc"))
if name == "heap" && gc > 0 {
runtime.GC()
@ -248,6 +254,90 @@ func (name handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
p.WriteTo(w, debug)
}
func (name handler) serveDeltaProfile(w http.ResponseWriter, r *http.Request, p *pprof.Profile, secStr string) {
sec, err := strconv.ParseInt(secStr, 10, 64)
if err != nil || sec <= 0 {
serveError(w, http.StatusBadRequest, `invalid value for "seconds" - must be a positive integer`)
return
}
if !profileSupportsDelta[name] {
serveError(w, http.StatusBadRequest, `"seconds" parameter is not supported for this profile type`)
return
}
// 'name' should be a key in profileSupportsDelta.
if durationExceedsWriteTimeout(r, float64(sec)) {
serveError(w, http.StatusBadRequest, "profile duration exceeds server's WriteTimeout")
return
}
debug, _ := strconv.Atoi(r.FormValue("debug"))
if debug != 0 {
serveError(w, http.StatusBadRequest, "seconds and debug params are incompatible")
return
}
p0, err := collectProfile(p)
if err != nil {
serveError(w, http.StatusInternalServerError, "failed to collect profile")
return
}
t := time.NewTimer(time.Duration(sec) * time.Second)
defer t.Stop()
select {
case <-r.Context().Done():
err := r.Context().Err()
if err == context.DeadlineExceeded {
serveError(w, http.StatusRequestTimeout, err.Error())
} else { // TODO: what's a good status code for cancelled requests? 400?
serveError(w, http.StatusInternalServerError, err.Error())
}
return
case <-t.C:
}
p1, err := collectProfile(p)
if err != nil {
serveError(w, http.StatusInternalServerError, "failed to collect profile")
return
}
ts := p1.TimeNanos
dur := p1.TimeNanos - p0.TimeNanos
p0.Scale(-1)
p1, err = profile.Merge([]*profile.Profile{p0, p1})
if err != nil {
serveError(w, http.StatusInternalServerError, "failed to compute delta")
return
}
p1.TimeNanos = ts // set since we don't know what profile.Merge set for TimeNanos.
p1.DurationNanos = dur
w.Header().Set("Content-Type", "application/octet-stream")
w.Header().Set("Content-Disposition", fmt.Sprintf(`attachment; filename="%s-delta"`, name))
p1.Write(w)
}
func collectProfile(p *pprof.Profile) (*profile.Profile, error) {
var buf bytes.Buffer
if err := p.WriteTo(&buf, 0); err != nil {
return nil, err
}
ts := time.Now().UnixNano()
p0, err := profile.Parse(&buf)
if err != nil {
return nil, err
}
p0.TimeNanos = ts
return p0, nil
}
var profileSupportsDelta = map[handler]bool{
"block": true,
"mutex": true,
}
var profileDescriptions = map[string]string{
"allocs": "A sampling of all past memory allocations",
"block": "Stack traces that led to blocking on synchronization primitives",

View File

@ -6,11 +6,18 @@ package pprof
import (
"bytes"
"fmt"
"internal/profile"
"io/ioutil"
"net/http"
"net/http/httptest"
"runtime"
"runtime/pprof"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
)
// TestDescriptions checks that the profile names under runtime/pprof package
@ -40,6 +47,8 @@ func TestHandlers(t *testing.T) {
{"/debug/pprof/profile?seconds=1", Profile, http.StatusOK, "application/octet-stream", `attachment; filename="profile"`, nil},
{"/debug/pprof/symbol", Symbol, http.StatusOK, "text/plain; charset=utf-8", "", nil},
{"/debug/pprof/trace", Trace, http.StatusOK, "application/octet-stream", `attachment; filename="trace"`, nil},
{"/debug/pprof/mutex", Index, http.StatusOK, "application/octet-stream", `attachment; filename="mutex"`, nil},
{"/debug/pprof/block?seconds=1", Index, http.StatusOK, "application/octet-stream", `attachment; filename="block-delta"`, nil},
{"/debug/pprof/", Index, http.StatusOK, "text/html; charset=utf-8", "", []byte("Types of profiles available:")},
}
for _, tc := range testCases {
@ -78,5 +87,183 @@ func TestHandlers(t *testing.T) {
}
})
}
}
var Sink uint32
func mutexHog1(mu1, mu2 *sync.Mutex, start time.Time, dt time.Duration) {
atomic.AddUint32(&Sink, 1)
for time.Since(start) < dt {
// When using gccgo the loop of mutex operations is
// not preemptible. This can cause the loop to block a GC,
// causing the time limits in TestDeltaContentionz to fail.
// Since this loop is not very realistic, when using
// gccgo add preemption points 100 times a second.
t1 := time.Now()
for time.Since(start) < dt && time.Since(t1) < 10*time.Millisecond {
mu1.Lock()
mu2.Lock()
mu1.Unlock()
mu2.Unlock()
}
if runtime.Compiler == "gccgo" {
runtime.Gosched()
}
}
}
// mutexHog2 is almost identical to mutexHog but we keep them separate
// in order to distinguish them with function names in the stack trace.
// We make them slightly different, using Sink, because otherwise
// gccgo -c opt will merge them.
func mutexHog2(mu1, mu2 *sync.Mutex, start time.Time, dt time.Duration) {
atomic.AddUint32(&Sink, 2)
for time.Since(start) < dt {
// See comment in mutexHog.
t1 := time.Now()
for time.Since(start) < dt && time.Since(t1) < 10*time.Millisecond {
mu1.Lock()
mu2.Lock()
mu1.Unlock()
mu2.Unlock()
}
if runtime.Compiler == "gccgo" {
runtime.Gosched()
}
}
}
// mutexHog starts multiple goroutines that runs the given hogger function for the specified duration.
// The hogger function will be given two mutexes to lock & unlock.
func mutexHog(duration time.Duration, hogger func(mu1, mu2 *sync.Mutex, start time.Time, dt time.Duration)) {
start := time.Now()
mu1 := new(sync.Mutex)
mu2 := new(sync.Mutex)
var wg sync.WaitGroup
wg.Add(10)
for i := 0; i < 10; i++ {
go func() {
defer wg.Done()
hogger(mu1, mu2, start, duration)
}()
}
wg.Wait()
}
func TestDeltaProfile(t *testing.T) {
rate := runtime.SetMutexProfileFraction(1)
defer func() {
runtime.SetMutexProfileFraction(rate)
}()
// mutexHog1 will appear in non-delta mutex profile
// if the mutex profile works.
mutexHog(20*time.Millisecond, mutexHog1)
// If mutexHog1 does not appear in the mutex profile,
// skip this test. Mutex profile is likely not working,
// so is the delta profile.
p, err := query("/debug/pprof/mutex")
if err != nil {
t.Skipf("mutex profile is unsupported: %v", err)
}
if !seen(p, "mutexHog1") {
t.Skipf("mutex profile is not working: %v", p)
}
// causes mutexHog2 call stacks to appear in the mutex profile.
done := make(chan bool)
go func() {
for {
mutexHog(20*time.Millisecond, mutexHog2)
select {
case <-done:
done <- true
return
default:
time.Sleep(10 * time.Millisecond)
}
}
}()
defer func() { // cleanup the above goroutine.
done <- true
<-done // wait for the goroutine to exit.
}()
for _, tc := range []struct {
endpoint string
seconds int
mutexHog1, mutexHog2 bool
}{
{"/debug/pprof/mutex?seconds=1", 1, false, true},
{"/debug/pprof/mutex", 0, true, true},
} {
t.Run(tc.endpoint, func(t *testing.T) {
p, err := query(tc.endpoint)
if err != nil {
t.Fatalf("failed to query profile: %v", err)
}
t.Logf("Profile=%v", p)
if got := seen(p, "mutexHog1"); got != tc.mutexHog1 {
t.Errorf("seen(mutexHog1) = %t, want %t", got, tc.mutexHog1)
}
if got := seen(p, "mutexHog2"); got != tc.mutexHog2 {
t.Errorf("seen(mutexHog2) = %t, want %t", got, tc.mutexHog2)
}
if tc.seconds > 0 {
got := time.Duration(p.DurationNanos) * time.Nanosecond
want := time.Duration(tc.seconds) * time.Second
if got < want/2 || got > 2*want {
t.Errorf("got duration = %v; want ~%v", got, want)
}
}
})
}
}
var srv = httptest.NewServer(nil)
func query(endpoint string) (*profile.Profile, error) {
url := srv.URL + endpoint
r, err := http.Get(url)
if err != nil {
return nil, fmt.Errorf("failed to fetch %q: %v", url, err)
}
if r.StatusCode != http.StatusOK {
return nil, fmt.Errorf("failed to fetch %q: %v", url, r.Status)
}
b, err := ioutil.ReadAll(r.Body)
r.Body.Close()
if err != nil {
return nil, fmt.Errorf("failed to read and parse the result from %q: %v", url, err)
}
return profile.Parse(bytes.NewBuffer(b))
}
// seen returns true if the profile includes samples whose stacks include
// the specified function name (fname).
func seen(p *profile.Profile, fname string) bool {
locIDs := map[*profile.Location]bool{}
for _, loc := range p.Location {
for _, l := range loc.Line {
if strings.Contains(l.Function.Name, fname) {
locIDs[loc] = true
break
}
}
}
for _, sample := range p.Sample {
for _, loc := range sample.Location {
if locIDs[loc] {
return true
}
}
}
return false
}

View File

@ -9,10 +9,10 @@ package pprof
import (
"bytes"
"fmt"
"internal/profile"
"reflect"
"regexp"
"runtime"
"runtime/pprof/internal/profile"
"testing"
"unsafe"
)

View File

@ -10,6 +10,7 @@ import (
"bytes"
"context"
"fmt"
"internal/profile"
"internal/testenv"
"io"
"io/ioutil"
@ -18,7 +19,6 @@ import (
"os/exec"
"regexp"
"runtime"
"runtime/pprof/internal/profile"
"strings"
"sync"
"sync/atomic"

View File

@ -8,13 +8,13 @@ import (
"bytes"
"encoding/json"
"fmt"
"internal/profile"
"internal/testenv"
"io/ioutil"
"os"
"os/exec"
"reflect"
"runtime"
"runtime/pprof/internal/profile"
"strings"
"testing"
)

View File

@ -6,8 +6,8 @@ package pprof
import (
"bytes"
"internal/profile"
"runtime"
"runtime/pprof/internal/profile"
"testing"
)