2016-03-01 15:57:46 -07:00
|
|
|
// Copyright 2014 The Go Authors. All rights reserved.
|
2015-01-30 03:31:43 -07:00
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
|
|
|
"encoding/json"
|
|
|
|
"fmt"
|
|
|
|
"internal/trace"
|
2015-07-02 12:33:30 -06:00
|
|
|
"log"
|
2015-01-30 03:31:43 -07:00
|
|
|
"net/http"
|
|
|
|
"path/filepath"
|
2015-08-12 13:26:25 -06:00
|
|
|
"runtime"
|
2015-01-30 03:31:43 -07:00
|
|
|
"strconv"
|
|
|
|
"strings"
|
2016-05-03 08:44:25 -06:00
|
|
|
"time"
|
2015-01-30 03:31:43 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
http.HandleFunc("/trace", httpTrace)
|
|
|
|
http.HandleFunc("/jsontrace", httpJsonTrace)
|
|
|
|
http.HandleFunc("/trace_viewer_html", httpTraceViewerHTML)
|
|
|
|
}
|
|
|
|
|
|
|
|
// httpTrace serves either whole trace (goid==0) or trace for goid goroutine.
|
|
|
|
func httpTrace(w http.ResponseWriter, r *http.Request) {
|
|
|
|
_, err := parseEvents()
|
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
2016-05-03 08:44:25 -06:00
|
|
|
if err := r.ParseForm(); err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
2015-01-30 03:31:43 -07:00
|
|
|
}
|
2016-05-03 08:44:25 -06:00
|
|
|
html := strings.Replace(templTrace, "{{PARAMS}}", r.Form.Encode(), -1)
|
2015-01-30 03:31:43 -07:00
|
|
|
w.Write([]byte(html))
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2016-04-14 08:31:42 -06:00
|
|
|
// See https://github.com/catapult-project/catapult/blob/master/tracing/docs/embedding-trace-viewer.md
|
|
|
|
// This is almost verbatim copy of:
|
|
|
|
// https://github.com/catapult-project/catapult/blob/master/tracing/bin/index.html
|
2017-07-25 14:30:02 -06:00
|
|
|
// on revision 5f9e4c3eaa555bdef18218a89f38c768303b7b6e.
|
2015-01-30 03:31:43 -07:00
|
|
|
var templTrace = `
|
|
|
|
<html>
|
2016-04-14 08:31:42 -06:00
|
|
|
<head>
|
|
|
|
<link href="/trace_viewer_html" rel="import">
|
2017-07-25 14:30:02 -06:00
|
|
|
<style type="text/css">
|
|
|
|
html, body {
|
|
|
|
box-sizing: border-box;
|
|
|
|
overflow: hidden;
|
|
|
|
margin: 0px;
|
|
|
|
padding: 0;
|
|
|
|
width: 100%;
|
|
|
|
height: 100%;
|
|
|
|
}
|
|
|
|
#trace-viewer {
|
|
|
|
width: 100%;
|
|
|
|
height: 100%;
|
|
|
|
}
|
|
|
|
#trace-viewer:focus {
|
|
|
|
outline: none;
|
|
|
|
}
|
|
|
|
</style>
|
2016-04-14 08:31:42 -06:00
|
|
|
<script>
|
2017-07-25 14:30:02 -06:00
|
|
|
'use strict';
|
2016-04-14 08:31:42 -06:00
|
|
|
(function() {
|
|
|
|
var viewer;
|
|
|
|
var url;
|
|
|
|
var model;
|
|
|
|
|
|
|
|
function load() {
|
|
|
|
var req = new XMLHttpRequest();
|
|
|
|
var is_binary = /[.]gz$/.test(url) || /[.]zip$/.test(url);
|
|
|
|
req.overrideMimeType('text/plain; charset=x-user-defined');
|
|
|
|
req.open('GET', url, true);
|
|
|
|
if (is_binary)
|
|
|
|
req.responseType = 'arraybuffer';
|
|
|
|
|
|
|
|
req.onreadystatechange = function(event) {
|
|
|
|
if (req.readyState !== 4)
|
|
|
|
return;
|
|
|
|
|
|
|
|
window.setTimeout(function() {
|
|
|
|
if (req.status === 200)
|
|
|
|
onResult(is_binary ? req.response : req.responseText);
|
|
|
|
else
|
|
|
|
onResultFail(req.status);
|
|
|
|
}, 0);
|
|
|
|
};
|
|
|
|
req.send(null);
|
|
|
|
}
|
|
|
|
|
|
|
|
function onResultFail(err) {
|
|
|
|
var overlay = new tr.ui.b.Overlay();
|
|
|
|
overlay.textContent = err + ': ' + url + ' could not be loaded';
|
|
|
|
overlay.title = 'Failed to fetch data';
|
|
|
|
overlay.visible = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
function onResult(result) {
|
|
|
|
model = new tr.Model();
|
2017-07-25 14:38:39 -06:00
|
|
|
var opts = new tr.importer.ImportOptions();
|
|
|
|
opts.shiftWorldToZero = false;
|
|
|
|
var i = new tr.importer.Import(model, opts);
|
2016-04-14 08:31:42 -06:00
|
|
|
var p = i.importTracesWithProgressDialog([result]);
|
|
|
|
p.then(onModelLoaded, onImportFail);
|
|
|
|
}
|
|
|
|
|
|
|
|
function onModelLoaded() {
|
|
|
|
viewer.model = model;
|
|
|
|
viewer.viewTitle = "trace";
|
|
|
|
}
|
|
|
|
|
|
|
|
function onImportFail() {
|
|
|
|
var overlay = new tr.ui.b.Overlay();
|
|
|
|
overlay.textContent = tr.b.normalizeException(err).message;
|
|
|
|
overlay.title = 'Import error';
|
|
|
|
overlay.visible = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
document.addEventListener('DOMContentLoaded', function() {
|
|
|
|
var container = document.createElement('track-view-container');
|
|
|
|
container.id = 'track_view_container';
|
|
|
|
|
|
|
|
viewer = document.createElement('tr-ui-timeline-view');
|
|
|
|
viewer.track_view_container = container;
|
|
|
|
viewer.appendChild(container);
|
|
|
|
|
|
|
|
viewer.id = 'trace-viewer';
|
|
|
|
viewer.globalMode = true;
|
|
|
|
document.body.appendChild(viewer);
|
|
|
|
|
2016-05-03 08:44:25 -06:00
|
|
|
url = '/jsontrace?{{PARAMS}}';
|
2016-04-14 08:31:42 -06:00
|
|
|
load();
|
|
|
|
});
|
|
|
|
}());
|
|
|
|
</script>
|
|
|
|
</head>
|
|
|
|
<body>
|
|
|
|
</body>
|
2015-01-30 03:31:43 -07:00
|
|
|
</html>
|
|
|
|
`
|
|
|
|
|
|
|
|
// httpTraceViewerHTML serves static part of trace-viewer.
|
|
|
|
// This URL is queried from templTrace HTML.
|
|
|
|
func httpTraceViewerHTML(w http.ResponseWriter, r *http.Request) {
|
2015-08-12 13:26:25 -06:00
|
|
|
http.ServeFile(w, r, filepath.Join(runtime.GOROOT(), "misc", "trace", "trace_viewer_lean.html"))
|
2015-01-30 03:31:43 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// httpJsonTrace serves json trace, requested from within templTrace HTML.
|
|
|
|
func httpJsonTrace(w http.ResponseWriter, r *http.Request) {
|
2015-07-02 12:33:30 -06:00
|
|
|
// This is an AJAX handler, so instead of http.Error we use log.Printf to log errors.
|
2015-01-30 03:31:43 -07:00
|
|
|
events, err := parseEvents()
|
|
|
|
if err != nil {
|
2015-07-02 12:33:30 -06:00
|
|
|
log.Printf("failed to parse trace: %v", err)
|
2015-01-30 03:31:43 -07:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
params := &traceParams{
|
|
|
|
events: events,
|
|
|
|
endTime: int64(1<<63 - 1),
|
|
|
|
}
|
|
|
|
|
|
|
|
if goids := r.FormValue("goid"); goids != "" {
|
2016-05-03 08:44:25 -06:00
|
|
|
// If goid argument is present, we are rendering a trace for this particular goroutine.
|
2015-01-30 03:31:43 -07:00
|
|
|
goid, err := strconv.ParseUint(goids, 10, 64)
|
|
|
|
if err != nil {
|
2015-07-02 12:33:30 -06:00
|
|
|
log.Printf("failed to parse goid parameter '%v': %v", goids, err)
|
2015-01-30 03:31:43 -07:00
|
|
|
return
|
|
|
|
}
|
|
|
|
analyzeGoroutines(events)
|
|
|
|
g := gs[goid]
|
|
|
|
params.gtrace = true
|
|
|
|
params.startTime = g.StartTime
|
|
|
|
params.endTime = g.EndTime
|
|
|
|
params.maing = goid
|
2015-03-10 10:40:09 -06:00
|
|
|
params.gs = trace.RelatedGoroutines(events, goid)
|
2015-01-30 03:31:43 -07:00
|
|
|
}
|
|
|
|
|
2016-08-08 15:24:07 -06:00
|
|
|
data, err := generateTrace(params)
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("failed to generate trace: %v", err)
|
|
|
|
return
|
|
|
|
}
|
2016-05-03 08:44:25 -06:00
|
|
|
|
|
|
|
if startStr, endStr := r.FormValue("start"), r.FormValue("end"); startStr != "" && endStr != "" {
|
|
|
|
// If start/end arguments are present, we are rendering a range of the trace.
|
|
|
|
start, err := strconv.ParseUint(startStr, 10, 64)
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("failed to parse start parameter '%v': %v", startStr, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
end, err := strconv.ParseUint(endStr, 10, 64)
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("failed to parse end parameter '%v': %v", endStr, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if start >= uint64(len(data.Events)) || end <= start || end > uint64(len(data.Events)) {
|
|
|
|
log.Printf("bogus start/end parameters: %v/%v, trace size %v", start, end, len(data.Events))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
data.Events = append(data.Events[start:end], data.Events[data.footer:]...)
|
|
|
|
}
|
|
|
|
err = json.NewEncoder(w).Encode(data)
|
2015-01-30 03:31:43 -07:00
|
|
|
if err != nil {
|
2015-07-02 12:33:30 -06:00
|
|
|
log.Printf("failed to serialize trace: %v", err)
|
2015-01-30 03:31:43 -07:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-03 08:44:25 -06:00
|
|
|
type Range struct {
|
|
|
|
Name string
|
|
|
|
Start int
|
|
|
|
End int
|
|
|
|
}
|
|
|
|
|
|
|
|
// splitTrace splits the trace into a number of ranges,
|
|
|
|
// each resulting in approx 100MB of json output (trace viewer can hardly handle more).
|
|
|
|
func splitTrace(data ViewerData) []Range {
|
|
|
|
const rangeSize = 100 << 20
|
|
|
|
var ranges []Range
|
|
|
|
cw := new(countingWriter)
|
|
|
|
enc := json.NewEncoder(cw)
|
|
|
|
// First calculate size of the mandatory part of the trace.
|
|
|
|
// This includes stack traces and thread names.
|
|
|
|
data1 := data
|
|
|
|
data1.Events = data.Events[data.footer:]
|
|
|
|
enc.Encode(data1)
|
|
|
|
auxSize := cw.size
|
|
|
|
cw.size = 0
|
|
|
|
// Then calculate size of each individual event and group them into ranges.
|
|
|
|
for i, start := 0, 0; i < data.footer; i++ {
|
|
|
|
enc.Encode(data.Events[i])
|
|
|
|
if cw.size+auxSize > rangeSize || i == data.footer-1 {
|
|
|
|
ranges = append(ranges, Range{
|
|
|
|
Name: fmt.Sprintf("%v-%v", time.Duration(data.Events[start].Time*1000), time.Duration(data.Events[i].Time*1000)),
|
|
|
|
Start: start,
|
|
|
|
End: i + 1,
|
|
|
|
})
|
|
|
|
start = i + 1
|
|
|
|
cw.size = 0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(ranges) == 1 {
|
|
|
|
ranges = nil
|
|
|
|
}
|
|
|
|
return ranges
|
|
|
|
}
|
|
|
|
|
|
|
|
type countingWriter struct {
|
|
|
|
size int
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cw *countingWriter) Write(data []byte) (int, error) {
|
|
|
|
cw.size += len(data)
|
|
|
|
return len(data), nil
|
|
|
|
}
|
|
|
|
|
2015-01-30 03:31:43 -07:00
|
|
|
type traceParams struct {
|
|
|
|
events []*trace.Event
|
|
|
|
gtrace bool
|
|
|
|
startTime int64
|
|
|
|
endTime int64
|
|
|
|
maing uint64
|
|
|
|
gs map[uint64]bool
|
|
|
|
}
|
|
|
|
|
|
|
|
type traceContext struct {
|
|
|
|
*traceParams
|
|
|
|
data ViewerData
|
|
|
|
frameTree frameNode
|
|
|
|
frameSeq int
|
|
|
|
arrowSeq uint64
|
cmd/trace: fix goroutine view
Currently, trace processing interleaves state/statistics updates and
emitting trace viewer objects. As a result, if events are being
filtered, either by time or by goroutines, we'll miss those
state/statistics updates. At best, this leads to bad statistics;
however, since we're now strictly checking G state transitions, it
usually leads to a failure to process the trace if there is any
filtering.
Fix this by separating state updates from emitting trace object. State
updates are done before filtering, so we always have correct state
information and statistics. Trace objects are only emitted if we pass
the filter. To determine when we need to emit trace counters, rather
than duplicating the knowledge of which events might modify
statistics, we keep track of the previously emitted counters and emit
a trace counter object whenever these have changed.
Fixes #17719.
Change-Id: Ic66e3ddaef60d1acaaf2ff4c62baa5352799cf99
Reviewed-on: https://go-review.googlesource.com/32810
Reviewed-by: Dmitry Vyukov <dvyukov@google.com>
2016-11-04 09:13:27 -06:00
|
|
|
gcount uint64
|
|
|
|
|
|
|
|
heapStats, prevHeapStats heapStats
|
|
|
|
threadStats, prevThreadStats threadStats
|
|
|
|
gstates, prevGstates [gStateCount]uint64
|
|
|
|
}
|
|
|
|
|
|
|
|
type heapStats struct {
|
2015-01-30 03:31:43 -07:00
|
|
|
heapAlloc uint64
|
|
|
|
nextGC uint64
|
cmd/trace: fix goroutine view
Currently, trace processing interleaves state/statistics updates and
emitting trace viewer objects. As a result, if events are being
filtered, either by time or by goroutines, we'll miss those
state/statistics updates. At best, this leads to bad statistics;
however, since we're now strictly checking G state transitions, it
usually leads to a failure to process the trace if there is any
filtering.
Fix this by separating state updates from emitting trace object. State
updates are done before filtering, so we always have correct state
information and statistics. Trace objects are only emitted if we pass
the filter. To determine when we need to emit trace counters, rather
than duplicating the knowledge of which events might modify
statistics, we keep track of the previously emitted counters and emit
a trace counter object whenever these have changed.
Fixes #17719.
Change-Id: Ic66e3ddaef60d1acaaf2ff4c62baa5352799cf99
Reviewed-on: https://go-review.googlesource.com/32810
Reviewed-by: Dmitry Vyukov <dvyukov@google.com>
2016-11-04 09:13:27 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
type threadStats struct {
|
2015-01-30 03:31:43 -07:00
|
|
|
insyscall uint64
|
|
|
|
prunning uint64
|
|
|
|
}
|
|
|
|
|
|
|
|
type frameNode struct {
|
|
|
|
id int
|
|
|
|
children map[uint64]frameNode
|
|
|
|
}
|
|
|
|
|
2016-10-08 15:35:27 -06:00
|
|
|
type gState int
|
|
|
|
|
|
|
|
const (
|
|
|
|
gDead gState = iota
|
|
|
|
gRunnable
|
|
|
|
gRunning
|
|
|
|
gWaiting
|
runtime, cmd/trace: track goroutines blocked on GC assists
Currently when a goroutine blocks on a GC assist, it emits a generic
EvGoBlock event. Since assist blocking events and, in particular, the
length of the blocked assist queue, are important for diagnosing GC
behavior, this commit adds a new EvGoBlockGC event for blocking on a
GC assist. The trace viewer uses this event to report a "waiting on
GC" count in the "Goroutines" row. This makes sense because, unlike
other blocked goroutines, these goroutines do have work to do, so
being blocked on a GC assist is quite similar to being in the
"runnable" state, which we also report in the trace viewer.
Change-Id: Ic21a326992606b121ea3d3d00110d8d1fdc7a5ef
Reviewed-on: https://go-review.googlesource.com/30704
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Dmitry Vyukov <dvyukov@google.com>
2016-10-08 16:38:35 -06:00
|
|
|
gWaitingGC
|
2016-10-08 15:35:27 -06:00
|
|
|
|
|
|
|
gStateCount
|
|
|
|
)
|
|
|
|
|
2017-01-31 12:09:14 -07:00
|
|
|
type gInfo struct {
|
|
|
|
state gState // current state
|
|
|
|
name string // name chosen for this goroutine at first EvGoStart
|
|
|
|
start *trace.Event // most recent EvGoStart
|
|
|
|
markAssist *trace.Event // if non-nil, the mark assist currently running.
|
|
|
|
}
|
|
|
|
|
2015-01-30 03:31:43 -07:00
|
|
|
type ViewerData struct {
|
2015-06-18 08:27:29 -06:00
|
|
|
Events []*ViewerEvent `json:"traceEvents"`
|
|
|
|
Frames map[string]ViewerFrame `json:"stackFrames"`
|
|
|
|
TimeUnit string `json:"displayTimeUnit"`
|
2016-05-03 08:44:25 -06:00
|
|
|
|
|
|
|
// This is where mandatory part of the trace starts (e.g. thread names)
|
|
|
|
footer int
|
2015-01-30 03:31:43 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
type ViewerEvent struct {
|
|
|
|
Name string `json:"name,omitempty"`
|
|
|
|
Phase string `json:"ph"`
|
|
|
|
Scope string `json:"s,omitempty"`
|
2015-06-18 08:27:29 -06:00
|
|
|
Time float64 `json:"ts"`
|
|
|
|
Dur float64 `json:"dur,omitempty"`
|
2015-01-30 03:31:43 -07:00
|
|
|
Pid uint64 `json:"pid"`
|
|
|
|
Tid uint64 `json:"tid"`
|
|
|
|
ID uint64 `json:"id,omitempty"`
|
|
|
|
Stack int `json:"sf,omitempty"`
|
|
|
|
EndStack int `json:"esf,omitempty"`
|
|
|
|
Arg interface{} `json:"args,omitempty"`
|
|
|
|
}
|
|
|
|
|
|
|
|
type ViewerFrame struct {
|
|
|
|
Name string `json:"name"`
|
|
|
|
Parent int `json:"parent,omitempty"`
|
|
|
|
}
|
|
|
|
|
|
|
|
type NameArg struct {
|
|
|
|
Name string `json:"name"`
|
|
|
|
}
|
|
|
|
|
|
|
|
type SortIndexArg struct {
|
|
|
|
Index int `json:"sort_index"`
|
|
|
|
}
|
|
|
|
|
|
|
|
// generateTrace generates json trace for trace-viewer:
|
|
|
|
// https://github.com/google/trace-viewer
|
|
|
|
// Trace format is described at:
|
|
|
|
// https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/view
|
|
|
|
// If gtrace=true, generate trace for goroutine goid, otherwise whole trace.
|
|
|
|
// startTime, endTime determine part of the trace that we are interested in.
|
|
|
|
// gset restricts goroutines that are included in the resulting trace.
|
2016-08-08 15:24:07 -06:00
|
|
|
func generateTrace(params *traceParams) (ViewerData, error) {
|
2015-01-30 03:31:43 -07:00
|
|
|
ctx := &traceContext{traceParams: params}
|
|
|
|
ctx.frameTree.children = make(map[uint64]frameNode)
|
|
|
|
ctx.data.Frames = make(map[string]ViewerFrame)
|
2015-06-18 08:27:29 -06:00
|
|
|
ctx.data.TimeUnit = "ns"
|
2015-01-30 03:31:43 -07:00
|
|
|
maxProc := 0
|
2017-01-31 12:09:14 -07:00
|
|
|
ginfos := make(map[uint64]*gInfo)
|
|
|
|
|
|
|
|
getGInfo := func(g uint64) *gInfo {
|
|
|
|
info, ok := ginfos[g]
|
|
|
|
if !ok {
|
|
|
|
info = &gInfo{}
|
|
|
|
ginfos[g] = info
|
|
|
|
}
|
|
|
|
return info
|
|
|
|
}
|
|
|
|
|
2016-10-08 15:35:27 -06:00
|
|
|
// Since we make many calls to setGState, we record a sticky
|
|
|
|
// error in setGStateErr and check it after every event.
|
|
|
|
var setGStateErr error
|
|
|
|
setGState := func(ev *trace.Event, g uint64, oldState, newState gState) {
|
2017-01-31 12:09:14 -07:00
|
|
|
info := getGInfo(g)
|
|
|
|
if oldState == gWaiting && info.state == gWaitingGC {
|
runtime, cmd/trace: track goroutines blocked on GC assists
Currently when a goroutine blocks on a GC assist, it emits a generic
EvGoBlock event. Since assist blocking events and, in particular, the
length of the blocked assist queue, are important for diagnosing GC
behavior, this commit adds a new EvGoBlockGC event for blocking on a
GC assist. The trace viewer uses this event to report a "waiting on
GC" count in the "Goroutines" row. This makes sense because, unlike
other blocked goroutines, these goroutines do have work to do, so
being blocked on a GC assist is quite similar to being in the
"runnable" state, which we also report in the trace viewer.
Change-Id: Ic21a326992606b121ea3d3d00110d8d1fdc7a5ef
Reviewed-on: https://go-review.googlesource.com/30704
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Dmitry Vyukov <dvyukov@google.com>
2016-10-08 16:38:35 -06:00
|
|
|
// For checking, gWaiting counts as any gWaiting*.
|
2017-01-31 12:09:14 -07:00
|
|
|
oldState = info.state
|
runtime, cmd/trace: track goroutines blocked on GC assists
Currently when a goroutine blocks on a GC assist, it emits a generic
EvGoBlock event. Since assist blocking events and, in particular, the
length of the blocked assist queue, are important for diagnosing GC
behavior, this commit adds a new EvGoBlockGC event for blocking on a
GC assist. The trace viewer uses this event to report a "waiting on
GC" count in the "Goroutines" row. This makes sense because, unlike
other blocked goroutines, these goroutines do have work to do, so
being blocked on a GC assist is quite similar to being in the
"runnable" state, which we also report in the trace viewer.
Change-Id: Ic21a326992606b121ea3d3d00110d8d1fdc7a5ef
Reviewed-on: https://go-review.googlesource.com/30704
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Dmitry Vyukov <dvyukov@google.com>
2016-10-08 16:38:35 -06:00
|
|
|
}
|
2017-01-31 12:09:14 -07:00
|
|
|
if info.state != oldState && setGStateErr == nil {
|
2016-10-08 15:35:27 -06:00
|
|
|
setGStateErr = fmt.Errorf("expected G %d to be in state %d, but got state %d", g, oldState, newState)
|
|
|
|
}
|
2017-01-31 12:09:14 -07:00
|
|
|
ctx.gstates[info.state]--
|
2016-10-08 15:35:27 -06:00
|
|
|
ctx.gstates[newState]++
|
2017-01-31 12:09:14 -07:00
|
|
|
info.state = newState
|
2016-10-08 15:35:27 -06:00
|
|
|
}
|
2015-01-30 03:31:43 -07:00
|
|
|
for _, ev := range ctx.events {
|
cmd/trace: fix goroutine view
Currently, trace processing interleaves state/statistics updates and
emitting trace viewer objects. As a result, if events are being
filtered, either by time or by goroutines, we'll miss those
state/statistics updates. At best, this leads to bad statistics;
however, since we're now strictly checking G state transitions, it
usually leads to a failure to process the trace if there is any
filtering.
Fix this by separating state updates from emitting trace object. State
updates are done before filtering, so we always have correct state
information and statistics. Trace objects are only emitted if we pass
the filter. To determine when we need to emit trace counters, rather
than duplicating the knowledge of which events might modify
statistics, we keep track of the previously emitted counters and emit
a trace counter object whenever these have changed.
Fixes #17719.
Change-Id: Ic66e3ddaef60d1acaaf2ff4c62baa5352799cf99
Reviewed-on: https://go-review.googlesource.com/32810
Reviewed-by: Dmitry Vyukov <dvyukov@google.com>
2016-11-04 09:13:27 -06:00
|
|
|
// Handle state transitions before we filter out events.
|
|
|
|
switch ev.Type {
|
|
|
|
case trace.EvGoStart, trace.EvGoStartLabel:
|
|
|
|
setGState(ev, ev.G, gRunnable, gRunning)
|
2017-01-31 12:09:14 -07:00
|
|
|
info := getGInfo(ev.G)
|
|
|
|
if info.name == "" {
|
2015-01-30 03:31:43 -07:00
|
|
|
if len(ev.Stk) > 0 {
|
2017-01-31 12:09:14 -07:00
|
|
|
info.name = fmt.Sprintf("G%v %s", ev.G, ev.Stk[0].Fn)
|
2015-01-30 03:31:43 -07:00
|
|
|
} else {
|
2017-01-31 12:09:14 -07:00
|
|
|
info.name = fmt.Sprintf("G%v", ev.G)
|
2015-01-30 03:31:43 -07:00
|
|
|
}
|
|
|
|
}
|
2017-01-31 12:09:14 -07:00
|
|
|
info.start = ev
|
cmd/trace: fix goroutine view
Currently, trace processing interleaves state/statistics updates and
emitting trace viewer objects. As a result, if events are being
filtered, either by time or by goroutines, we'll miss those
state/statistics updates. At best, this leads to bad statistics;
however, since we're now strictly checking G state transitions, it
usually leads to a failure to process the trace if there is any
filtering.
Fix this by separating state updates from emitting trace object. State
updates are done before filtering, so we always have correct state
information and statistics. Trace objects are only emitted if we pass
the filter. To determine when we need to emit trace counters, rather
than duplicating the knowledge of which events might modify
statistics, we keep track of the previously emitted counters and emit
a trace counter object whenever these have changed.
Fixes #17719.
Change-Id: Ic66e3ddaef60d1acaaf2ff4c62baa5352799cf99
Reviewed-on: https://go-review.googlesource.com/32810
Reviewed-by: Dmitry Vyukov <dvyukov@google.com>
2016-11-04 09:13:27 -06:00
|
|
|
case trace.EvProcStart:
|
|
|
|
ctx.threadStats.prunning++
|
|
|
|
case trace.EvProcStop:
|
|
|
|
ctx.threadStats.prunning--
|
|
|
|
case trace.EvGoCreate:
|
|
|
|
ctx.gcount++
|
|
|
|
setGState(ev, ev.Args[0], gDead, gRunnable)
|
|
|
|
case trace.EvGoEnd:
|
|
|
|
ctx.gcount--
|
|
|
|
setGState(ev, ev.G, gRunning, gDead)
|
|
|
|
case trace.EvGoUnblock:
|
|
|
|
setGState(ev, ev.Args[0], gWaiting, gRunnable)
|
|
|
|
case trace.EvGoSysExit:
|
|
|
|
setGState(ev, ev.G, gWaiting, gRunnable)
|
|
|
|
ctx.threadStats.insyscall--
|
|
|
|
case trace.EvGoSysBlock:
|
|
|
|
setGState(ev, ev.G, gRunning, gWaiting)
|
|
|
|
ctx.threadStats.insyscall++
|
|
|
|
case trace.EvGoSched, trace.EvGoPreempt:
|
|
|
|
setGState(ev, ev.G, gRunning, gRunnable)
|
|
|
|
case trace.EvGoStop,
|
|
|
|
trace.EvGoSleep, trace.EvGoBlock, trace.EvGoBlockSend, trace.EvGoBlockRecv,
|
|
|
|
trace.EvGoBlockSelect, trace.EvGoBlockSync, trace.EvGoBlockCond, trace.EvGoBlockNet:
|
|
|
|
setGState(ev, ev.G, gRunning, gWaiting)
|
|
|
|
case trace.EvGoBlockGC:
|
|
|
|
setGState(ev, ev.G, gRunning, gWaitingGC)
|
2017-01-31 12:09:14 -07:00
|
|
|
case trace.EvGCMarkAssistStart:
|
|
|
|
getGInfo(ev.G).markAssist = ev
|
|
|
|
case trace.EvGCMarkAssistDone:
|
|
|
|
getGInfo(ev.G).markAssist = nil
|
cmd/trace: fix goroutine view
Currently, trace processing interleaves state/statistics updates and
emitting trace viewer objects. As a result, if events are being
filtered, either by time or by goroutines, we'll miss those
state/statistics updates. At best, this leads to bad statistics;
however, since we're now strictly checking G state transitions, it
usually leads to a failure to process the trace if there is any
filtering.
Fix this by separating state updates from emitting trace object. State
updates are done before filtering, so we always have correct state
information and statistics. Trace objects are only emitted if we pass
the filter. To determine when we need to emit trace counters, rather
than duplicating the knowledge of which events might modify
statistics, we keep track of the previously emitted counters and emit
a trace counter object whenever these have changed.
Fixes #17719.
Change-Id: Ic66e3ddaef60d1acaaf2ff4c62baa5352799cf99
Reviewed-on: https://go-review.googlesource.com/32810
Reviewed-by: Dmitry Vyukov <dvyukov@google.com>
2016-11-04 09:13:27 -06:00
|
|
|
case trace.EvGoWaiting:
|
|
|
|
setGState(ev, ev.G, gRunnable, gWaiting)
|
|
|
|
case trace.EvGoInSyscall:
|
|
|
|
// Cancel out the effect of EvGoCreate at the beginning.
|
|
|
|
setGState(ev, ev.G, gRunnable, gWaiting)
|
|
|
|
ctx.threadStats.insyscall++
|
|
|
|
case trace.EvHeapAlloc:
|
|
|
|
ctx.heapStats.heapAlloc = ev.Args[0]
|
|
|
|
case trace.EvNextGC:
|
|
|
|
ctx.heapStats.nextGC = ev.Args[0]
|
|
|
|
}
|
|
|
|
if setGStateErr != nil {
|
|
|
|
return ctx.data, setGStateErr
|
|
|
|
}
|
2015-01-30 03:31:43 -07:00
|
|
|
|
|
|
|
// Ignore events that are from uninteresting goroutines
|
|
|
|
// or outside of the interesting timeframe.
|
|
|
|
if ctx.gs != nil && ev.P < trace.FakeP && !ctx.gs[ev.G] {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if ev.Ts < ctx.startTime || ev.Ts > ctx.endTime {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if ev.P < trace.FakeP && ev.P > maxProc {
|
|
|
|
maxProc = ev.P
|
|
|
|
}
|
|
|
|
|
cmd/trace: fix goroutine view
Currently, trace processing interleaves state/statistics updates and
emitting trace viewer objects. As a result, if events are being
filtered, either by time or by goroutines, we'll miss those
state/statistics updates. At best, this leads to bad statistics;
however, since we're now strictly checking G state transitions, it
usually leads to a failure to process the trace if there is any
filtering.
Fix this by separating state updates from emitting trace object. State
updates are done before filtering, so we always have correct state
information and statistics. Trace objects are only emitted if we pass
the filter. To determine when we need to emit trace counters, rather
than duplicating the knowledge of which events might modify
statistics, we keep track of the previously emitted counters and emit
a trace counter object whenever these have changed.
Fixes #17719.
Change-Id: Ic66e3ddaef60d1acaaf2ff4c62baa5352799cf99
Reviewed-on: https://go-review.googlesource.com/32810
Reviewed-by: Dmitry Vyukov <dvyukov@google.com>
2016-11-04 09:13:27 -06:00
|
|
|
// Emit trace objects.
|
2015-01-30 03:31:43 -07:00
|
|
|
switch ev.Type {
|
|
|
|
case trace.EvProcStart:
|
|
|
|
if ctx.gtrace {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
ctx.emitInstant(ev, "proc start")
|
|
|
|
case trace.EvProcStop:
|
|
|
|
if ctx.gtrace {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
ctx.emitInstant(ev, "proc stop")
|
|
|
|
case trace.EvGCStart:
|
|
|
|
ctx.emitSlice(ev, "GC")
|
|
|
|
case trace.EvGCDone:
|
|
|
|
case trace.EvGCScanStart:
|
|
|
|
if ctx.gtrace {
|
|
|
|
continue
|
|
|
|
}
|
2016-09-29 09:59:56 -06:00
|
|
|
ctx.emitSlice(ev, "MARK TERMINATION")
|
2015-01-30 03:31:43 -07:00
|
|
|
case trace.EvGCScanDone:
|
2017-01-31 12:09:14 -07:00
|
|
|
case trace.EvGCMarkAssistStart:
|
|
|
|
// Mark assists can continue past preemptions, so truncate to the
|
|
|
|
// whichever comes first. We'll synthesize another slice if
|
|
|
|
// necessary in EvGoStart.
|
|
|
|
markFinish := ev.Link
|
|
|
|
goFinish := getGInfo(ev.G).start.Link
|
|
|
|
fakeMarkStart := *ev
|
|
|
|
text := "MARK ASSIST"
|
2017-02-22 15:58:31 -07:00
|
|
|
if markFinish == nil || markFinish.Ts > goFinish.Ts {
|
2017-01-31 12:09:14 -07:00
|
|
|
fakeMarkStart.Link = goFinish
|
|
|
|
text = "MARK ASSIST (unfinished)"
|
|
|
|
}
|
|
|
|
ctx.emitSlice(&fakeMarkStart, text)
|
2015-01-30 03:31:43 -07:00
|
|
|
case trace.EvGCSweepStart:
|
2017-04-14 12:53:09 -06:00
|
|
|
slice := ctx.emitSlice(ev, "SWEEP")
|
|
|
|
if done := ev.Link; done != nil && done.Args[0] != 0 {
|
|
|
|
slice.Arg = struct {
|
|
|
|
Swept uint64 `json:"Swept bytes"`
|
|
|
|
Reclaimed uint64 `json:"Reclaimed bytes"`
|
|
|
|
}{done.Args[0], done.Args[1]}
|
|
|
|
}
|
2017-01-31 12:09:14 -07:00
|
|
|
case trace.EvGoStart, trace.EvGoStartLabel:
|
|
|
|
info := getGInfo(ev.G)
|
|
|
|
if ev.Type == trace.EvGoStartLabel {
|
|
|
|
ctx.emitSlice(ev, ev.SArgs[0])
|
|
|
|
} else {
|
|
|
|
ctx.emitSlice(ev, info.name)
|
|
|
|
}
|
|
|
|
if info.markAssist != nil {
|
|
|
|
// If we're in a mark assist, synthesize a new slice, ending
|
|
|
|
// either when the mark assist ends or when we're descheduled.
|
|
|
|
markFinish := info.markAssist.Link
|
|
|
|
goFinish := ev.Link
|
|
|
|
fakeMarkStart := *ev
|
|
|
|
text := "MARK ASSIST (resumed, unfinished)"
|
2017-02-22 15:58:31 -07:00
|
|
|
if markFinish != nil && markFinish.Ts < goFinish.Ts {
|
2017-01-31 12:09:14 -07:00
|
|
|
fakeMarkStart.Link = markFinish
|
|
|
|
text = "MARK ASSIST (resumed)"
|
|
|
|
}
|
|
|
|
ctx.emitSlice(&fakeMarkStart, text)
|
|
|
|
}
|
2015-01-30 03:31:43 -07:00
|
|
|
case trace.EvGoCreate:
|
|
|
|
ctx.emitArrow(ev, "go")
|
|
|
|
case trace.EvGoUnblock:
|
|
|
|
ctx.emitArrow(ev, "unblock")
|
|
|
|
case trace.EvGoSysCall:
|
|
|
|
ctx.emitInstant(ev, "syscall")
|
|
|
|
case trace.EvGoSysExit:
|
|
|
|
ctx.emitArrow(ev, "sysexit")
|
2016-08-08 15:24:07 -06:00
|
|
|
}
|
cmd/trace: fix goroutine view
Currently, trace processing interleaves state/statistics updates and
emitting trace viewer objects. As a result, if events are being
filtered, either by time or by goroutines, we'll miss those
state/statistics updates. At best, this leads to bad statistics;
however, since we're now strictly checking G state transitions, it
usually leads to a failure to process the trace if there is any
filtering.
Fix this by separating state updates from emitting trace object. State
updates are done before filtering, so we always have correct state
information and statistics. Trace objects are only emitted if we pass
the filter. To determine when we need to emit trace counters, rather
than duplicating the knowledge of which events might modify
statistics, we keep track of the previously emitted counters and emit
a trace counter object whenever these have changed.
Fixes #17719.
Change-Id: Ic66e3ddaef60d1acaaf2ff4c62baa5352799cf99
Reviewed-on: https://go-review.googlesource.com/32810
Reviewed-by: Dmitry Vyukov <dvyukov@google.com>
2016-11-04 09:13:27 -06:00
|
|
|
// Emit any counter updates.
|
|
|
|
ctx.emitThreadCounters(ev)
|
|
|
|
ctx.emitHeapCounters(ev)
|
|
|
|
ctx.emitGoroutineCounters(ev)
|
2015-01-30 03:31:43 -07:00
|
|
|
}
|
|
|
|
|
2016-05-03 08:44:25 -06:00
|
|
|
ctx.data.footer = len(ctx.data.Events)
|
2015-01-30 03:31:43 -07:00
|
|
|
ctx.emit(&ViewerEvent{Name: "process_name", Phase: "M", Pid: 0, Arg: &NameArg{"PROCS"}})
|
|
|
|
ctx.emit(&ViewerEvent{Name: "process_sort_index", Phase: "M", Pid: 0, Arg: &SortIndexArg{1}})
|
|
|
|
|
|
|
|
ctx.emit(&ViewerEvent{Name: "process_name", Phase: "M", Pid: 1, Arg: &NameArg{"STATS"}})
|
|
|
|
ctx.emit(&ViewerEvent{Name: "process_sort_index", Phase: "M", Pid: 1, Arg: &SortIndexArg{0}})
|
|
|
|
|
2016-09-29 09:53:42 -06:00
|
|
|
ctx.emit(&ViewerEvent{Name: "thread_name", Phase: "M", Pid: 0, Tid: trace.GCP, Arg: &NameArg{"GC"}})
|
|
|
|
ctx.emit(&ViewerEvent{Name: "thread_sort_index", Phase: "M", Pid: 0, Tid: trace.GCP, Arg: &SortIndexArg{-6}})
|
|
|
|
|
2015-01-30 03:31:43 -07:00
|
|
|
ctx.emit(&ViewerEvent{Name: "thread_name", Phase: "M", Pid: 0, Tid: trace.NetpollP, Arg: &NameArg{"Network"}})
|
|
|
|
ctx.emit(&ViewerEvent{Name: "thread_sort_index", Phase: "M", Pid: 0, Tid: trace.NetpollP, Arg: &SortIndexArg{-5}})
|
|
|
|
|
|
|
|
ctx.emit(&ViewerEvent{Name: "thread_name", Phase: "M", Pid: 0, Tid: trace.TimerP, Arg: &NameArg{"Timers"}})
|
|
|
|
ctx.emit(&ViewerEvent{Name: "thread_sort_index", Phase: "M", Pid: 0, Tid: trace.TimerP, Arg: &SortIndexArg{-4}})
|
|
|
|
|
|
|
|
ctx.emit(&ViewerEvent{Name: "thread_name", Phase: "M", Pid: 0, Tid: trace.SyscallP, Arg: &NameArg{"Syscalls"}})
|
|
|
|
ctx.emit(&ViewerEvent{Name: "thread_sort_index", Phase: "M", Pid: 0, Tid: trace.SyscallP, Arg: &SortIndexArg{-3}})
|
|
|
|
|
|
|
|
if !ctx.gtrace {
|
|
|
|
for i := 0; i <= maxProc; i++ {
|
|
|
|
ctx.emit(&ViewerEvent{Name: "thread_name", Phase: "M", Pid: 0, Tid: uint64(i), Arg: &NameArg{fmt.Sprintf("Proc %v", i)}})
|
2015-06-30 06:09:41 -06:00
|
|
|
ctx.emit(&ViewerEvent{Name: "thread_sort_index", Phase: "M", Pid: 0, Tid: uint64(i), Arg: &SortIndexArg{i}})
|
2015-01-30 03:31:43 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if ctx.gtrace && ctx.gs != nil {
|
2017-01-31 12:09:14 -07:00
|
|
|
for k, v := range ginfos {
|
2015-01-30 03:31:43 -07:00
|
|
|
if !ctx.gs[k] {
|
|
|
|
continue
|
|
|
|
}
|
2017-01-31 12:09:14 -07:00
|
|
|
ctx.emit(&ViewerEvent{Name: "thread_name", Phase: "M", Pid: 0, Tid: k, Arg: &NameArg{v.name}})
|
2015-01-30 03:31:43 -07:00
|
|
|
}
|
|
|
|
ctx.emit(&ViewerEvent{Name: "thread_sort_index", Phase: "M", Pid: 0, Tid: ctx.maing, Arg: &SortIndexArg{-2}})
|
|
|
|
ctx.emit(&ViewerEvent{Name: "thread_sort_index", Phase: "M", Pid: 0, Tid: 0, Arg: &SortIndexArg{-1}})
|
|
|
|
}
|
|
|
|
|
2016-08-08 15:24:07 -06:00
|
|
|
return ctx.data, nil
|
2015-01-30 03:31:43 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func (ctx *traceContext) emit(e *ViewerEvent) {
|
|
|
|
ctx.data.Events = append(ctx.data.Events, e)
|
|
|
|
}
|
|
|
|
|
2015-06-18 08:27:29 -06:00
|
|
|
func (ctx *traceContext) time(ev *trace.Event) float64 {
|
|
|
|
// Trace viewer wants timestamps in microseconds.
|
|
|
|
return float64(ev.Ts-ctx.startTime) / 1000
|
2015-01-30 03:31:43 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func (ctx *traceContext) proc(ev *trace.Event) uint64 {
|
|
|
|
if ctx.gtrace && ev.P < trace.FakeP {
|
|
|
|
return ev.G
|
|
|
|
} else {
|
|
|
|
return uint64(ev.P)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-14 12:53:09 -06:00
|
|
|
func (ctx *traceContext) emitSlice(ev *trace.Event, name string) *ViewerEvent {
|
|
|
|
sl := &ViewerEvent{
|
2015-02-21 11:01:40 -07:00
|
|
|
Name: name,
|
|
|
|
Phase: "X",
|
|
|
|
Time: ctx.time(ev),
|
|
|
|
Dur: ctx.time(ev.Link) - ctx.time(ev),
|
|
|
|
Tid: ctx.proc(ev),
|
|
|
|
Stack: ctx.stack(ev.Stk),
|
2015-01-30 03:31:43 -07:00
|
|
|
EndStack: ctx.stack(ev.Link.Stk),
|
2017-04-14 12:53:09 -06:00
|
|
|
}
|
|
|
|
ctx.emit(sl)
|
|
|
|
return sl
|
2015-01-30 03:31:43 -07:00
|
|
|
}
|
|
|
|
|
2016-08-08 15:24:07 -06:00
|
|
|
type heapCountersArg struct {
|
|
|
|
Allocated uint64
|
|
|
|
NextGC uint64
|
|
|
|
}
|
|
|
|
|
2015-01-30 03:31:43 -07:00
|
|
|
func (ctx *traceContext) emitHeapCounters(ev *trace.Event) {
|
|
|
|
if ctx.gtrace {
|
|
|
|
return
|
|
|
|
}
|
cmd/trace: fix goroutine view
Currently, trace processing interleaves state/statistics updates and
emitting trace viewer objects. As a result, if events are being
filtered, either by time or by goroutines, we'll miss those
state/statistics updates. At best, this leads to bad statistics;
however, since we're now strictly checking G state transitions, it
usually leads to a failure to process the trace if there is any
filtering.
Fix this by separating state updates from emitting trace object. State
updates are done before filtering, so we always have correct state
information and statistics. Trace objects are only emitted if we pass
the filter. To determine when we need to emit trace counters, rather
than duplicating the knowledge of which events might modify
statistics, we keep track of the previously emitted counters and emit
a trace counter object whenever these have changed.
Fixes #17719.
Change-Id: Ic66e3ddaef60d1acaaf2ff4c62baa5352799cf99
Reviewed-on: https://go-review.googlesource.com/32810
Reviewed-by: Dmitry Vyukov <dvyukov@google.com>
2016-11-04 09:13:27 -06:00
|
|
|
if ctx.prevHeapStats == ctx.heapStats {
|
|
|
|
return
|
|
|
|
}
|
2015-01-30 03:31:43 -07:00
|
|
|
diff := uint64(0)
|
cmd/trace: fix goroutine view
Currently, trace processing interleaves state/statistics updates and
emitting trace viewer objects. As a result, if events are being
filtered, either by time or by goroutines, we'll miss those
state/statistics updates. At best, this leads to bad statistics;
however, since we're now strictly checking G state transitions, it
usually leads to a failure to process the trace if there is any
filtering.
Fix this by separating state updates from emitting trace object. State
updates are done before filtering, so we always have correct state
information and statistics. Trace objects are only emitted if we pass
the filter. To determine when we need to emit trace counters, rather
than duplicating the knowledge of which events might modify
statistics, we keep track of the previously emitted counters and emit
a trace counter object whenever these have changed.
Fixes #17719.
Change-Id: Ic66e3ddaef60d1acaaf2ff4c62baa5352799cf99
Reviewed-on: https://go-review.googlesource.com/32810
Reviewed-by: Dmitry Vyukov <dvyukov@google.com>
2016-11-04 09:13:27 -06:00
|
|
|
if ctx.heapStats.nextGC > ctx.heapStats.heapAlloc {
|
|
|
|
diff = ctx.heapStats.nextGC - ctx.heapStats.heapAlloc
|
2015-01-30 03:31:43 -07:00
|
|
|
}
|
cmd/trace: fix goroutine view
Currently, trace processing interleaves state/statistics updates and
emitting trace viewer objects. As a result, if events are being
filtered, either by time or by goroutines, we'll miss those
state/statistics updates. At best, this leads to bad statistics;
however, since we're now strictly checking G state transitions, it
usually leads to a failure to process the trace if there is any
filtering.
Fix this by separating state updates from emitting trace object. State
updates are done before filtering, so we always have correct state
information and statistics. Trace objects are only emitted if we pass
the filter. To determine when we need to emit trace counters, rather
than duplicating the knowledge of which events might modify
statistics, we keep track of the previously emitted counters and emit
a trace counter object whenever these have changed.
Fixes #17719.
Change-Id: Ic66e3ddaef60d1acaaf2ff4c62baa5352799cf99
Reviewed-on: https://go-review.googlesource.com/32810
Reviewed-by: Dmitry Vyukov <dvyukov@google.com>
2016-11-04 09:13:27 -06:00
|
|
|
ctx.emit(&ViewerEvent{Name: "Heap", Phase: "C", Time: ctx.time(ev), Pid: 1, Arg: &heapCountersArg{ctx.heapStats.heapAlloc, diff}})
|
|
|
|
ctx.prevHeapStats = ctx.heapStats
|
2016-08-08 15:24:07 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
type goroutineCountersArg struct {
|
runtime, cmd/trace: track goroutines blocked on GC assists
Currently when a goroutine blocks on a GC assist, it emits a generic
EvGoBlock event. Since assist blocking events and, in particular, the
length of the blocked assist queue, are important for diagnosing GC
behavior, this commit adds a new EvGoBlockGC event for blocking on a
GC assist. The trace viewer uses this event to report a "waiting on
GC" count in the "Goroutines" row. This makes sense because, unlike
other blocked goroutines, these goroutines do have work to do, so
being blocked on a GC assist is quite similar to being in the
"runnable" state, which we also report in the trace viewer.
Change-Id: Ic21a326992606b121ea3d3d00110d8d1fdc7a5ef
Reviewed-on: https://go-review.googlesource.com/30704
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Dmitry Vyukov <dvyukov@google.com>
2016-10-08 16:38:35 -06:00
|
|
|
Running uint64
|
|
|
|
Runnable uint64
|
|
|
|
GCWaiting uint64
|
2015-01-30 03:31:43 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func (ctx *traceContext) emitGoroutineCounters(ev *trace.Event) {
|
|
|
|
if ctx.gtrace {
|
|
|
|
return
|
|
|
|
}
|
cmd/trace: fix goroutine view
Currently, trace processing interleaves state/statistics updates and
emitting trace viewer objects. As a result, if events are being
filtered, either by time or by goroutines, we'll miss those
state/statistics updates. At best, this leads to bad statistics;
however, since we're now strictly checking G state transitions, it
usually leads to a failure to process the trace if there is any
filtering.
Fix this by separating state updates from emitting trace object. State
updates are done before filtering, so we always have correct state
information and statistics. Trace objects are only emitted if we pass
the filter. To determine when we need to emit trace counters, rather
than duplicating the knowledge of which events might modify
statistics, we keep track of the previously emitted counters and emit
a trace counter object whenever these have changed.
Fixes #17719.
Change-Id: Ic66e3ddaef60d1acaaf2ff4c62baa5352799cf99
Reviewed-on: https://go-review.googlesource.com/32810
Reviewed-by: Dmitry Vyukov <dvyukov@google.com>
2016-11-04 09:13:27 -06:00
|
|
|
if ctx.prevGstates == ctx.gstates {
|
|
|
|
return
|
|
|
|
}
|
runtime, cmd/trace: track goroutines blocked on GC assists
Currently when a goroutine blocks on a GC assist, it emits a generic
EvGoBlock event. Since assist blocking events and, in particular, the
length of the blocked assist queue, are important for diagnosing GC
behavior, this commit adds a new EvGoBlockGC event for blocking on a
GC assist. The trace viewer uses this event to report a "waiting on
GC" count in the "Goroutines" row. This makes sense because, unlike
other blocked goroutines, these goroutines do have work to do, so
being blocked on a GC assist is quite similar to being in the
"runnable" state, which we also report in the trace viewer.
Change-Id: Ic21a326992606b121ea3d3d00110d8d1fdc7a5ef
Reviewed-on: https://go-review.googlesource.com/30704
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Dmitry Vyukov <dvyukov@google.com>
2016-10-08 16:38:35 -06:00
|
|
|
ctx.emit(&ViewerEvent{Name: "Goroutines", Phase: "C", Time: ctx.time(ev), Pid: 1, Arg: &goroutineCountersArg{ctx.gstates[gRunning], ctx.gstates[gRunnable], ctx.gstates[gWaitingGC]}})
|
cmd/trace: fix goroutine view
Currently, trace processing interleaves state/statistics updates and
emitting trace viewer objects. As a result, if events are being
filtered, either by time or by goroutines, we'll miss those
state/statistics updates. At best, this leads to bad statistics;
however, since we're now strictly checking G state transitions, it
usually leads to a failure to process the trace if there is any
filtering.
Fix this by separating state updates from emitting trace object. State
updates are done before filtering, so we always have correct state
information and statistics. Trace objects are only emitted if we pass
the filter. To determine when we need to emit trace counters, rather
than duplicating the knowledge of which events might modify
statistics, we keep track of the previously emitted counters and emit
a trace counter object whenever these have changed.
Fixes #17719.
Change-Id: Ic66e3ddaef60d1acaaf2ff4c62baa5352799cf99
Reviewed-on: https://go-review.googlesource.com/32810
Reviewed-by: Dmitry Vyukov <dvyukov@google.com>
2016-11-04 09:13:27 -06:00
|
|
|
ctx.prevGstates = ctx.gstates
|
2016-08-08 15:24:07 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
type threadCountersArg struct {
|
|
|
|
Running uint64
|
|
|
|
InSyscall uint64
|
2015-01-30 03:31:43 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func (ctx *traceContext) emitThreadCounters(ev *trace.Event) {
|
|
|
|
if ctx.gtrace {
|
|
|
|
return
|
|
|
|
}
|
cmd/trace: fix goroutine view
Currently, trace processing interleaves state/statistics updates and
emitting trace viewer objects. As a result, if events are being
filtered, either by time or by goroutines, we'll miss those
state/statistics updates. At best, this leads to bad statistics;
however, since we're now strictly checking G state transitions, it
usually leads to a failure to process the trace if there is any
filtering.
Fix this by separating state updates from emitting trace object. State
updates are done before filtering, so we always have correct state
information and statistics. Trace objects are only emitted if we pass
the filter. To determine when we need to emit trace counters, rather
than duplicating the knowledge of which events might modify
statistics, we keep track of the previously emitted counters and emit
a trace counter object whenever these have changed.
Fixes #17719.
Change-Id: Ic66e3ddaef60d1acaaf2ff4c62baa5352799cf99
Reviewed-on: https://go-review.googlesource.com/32810
Reviewed-by: Dmitry Vyukov <dvyukov@google.com>
2016-11-04 09:13:27 -06:00
|
|
|
if ctx.prevThreadStats == ctx.threadStats {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
ctx.emit(&ViewerEvent{Name: "Threads", Phase: "C", Time: ctx.time(ev), Pid: 1, Arg: &threadCountersArg{ctx.threadStats.prunning, ctx.threadStats.insyscall}})
|
|
|
|
ctx.prevThreadStats = ctx.threadStats
|
2015-01-30 03:31:43 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func (ctx *traceContext) emitInstant(ev *trace.Event, name string) {
|
2015-03-10 11:38:32 -06:00
|
|
|
var arg interface{}
|
|
|
|
if ev.Type == trace.EvProcStart {
|
|
|
|
type Arg struct {
|
|
|
|
ThreadID uint64
|
|
|
|
}
|
|
|
|
arg = &Arg{ev.Args[0]}
|
|
|
|
}
|
|
|
|
ctx.emit(&ViewerEvent{Name: name, Phase: "I", Scope: "t", Time: ctx.time(ev), Tid: ctx.proc(ev), Stack: ctx.stack(ev.Stk), Arg: arg})
|
2015-01-30 03:31:43 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func (ctx *traceContext) emitArrow(ev *trace.Event, name string) {
|
|
|
|
if ev.Link == nil {
|
|
|
|
// The other end of the arrow is not captured in the trace.
|
|
|
|
// For example, a goroutine was unblocked but was not scheduled before trace stop.
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if ctx.gtrace && (!ctx.gs[ev.Link.G] || ev.Link.Ts < ctx.startTime || ev.Link.Ts > ctx.endTime) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-06-18 08:27:29 -06:00
|
|
|
if ev.P == trace.NetpollP || ev.P == trace.TimerP || ev.P == trace.SyscallP {
|
|
|
|
// Trace-viewer discards arrows if they don't start/end inside of a slice or instant.
|
|
|
|
// So emit a fake instant at the start of the arrow.
|
|
|
|
ctx.emitInstant(&trace.Event{P: ev.P, Ts: ev.Ts}, "unblock")
|
|
|
|
}
|
|
|
|
|
2015-01-30 03:31:43 -07:00
|
|
|
ctx.arrowSeq++
|
|
|
|
ctx.emit(&ViewerEvent{Name: name, Phase: "s", Tid: ctx.proc(ev), ID: ctx.arrowSeq, Time: ctx.time(ev), Stack: ctx.stack(ev.Stk)})
|
|
|
|
ctx.emit(&ViewerEvent{Name: name, Phase: "t", Tid: ctx.proc(ev.Link), ID: ctx.arrowSeq, Time: ctx.time(ev.Link)})
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ctx *traceContext) stack(stk []*trace.Frame) int {
|
|
|
|
return ctx.buildBranch(ctx.frameTree, stk)
|
|
|
|
}
|
|
|
|
|
|
|
|
// buildBranch builds one branch in the prefix tree rooted at ctx.frameTree.
|
|
|
|
func (ctx *traceContext) buildBranch(parent frameNode, stk []*trace.Frame) int {
|
|
|
|
if len(stk) == 0 {
|
|
|
|
return parent.id
|
|
|
|
}
|
|
|
|
last := len(stk) - 1
|
|
|
|
frame := stk[last]
|
|
|
|
stk = stk[:last]
|
|
|
|
|
|
|
|
node, ok := parent.children[frame.PC]
|
|
|
|
if !ok {
|
|
|
|
ctx.frameSeq++
|
|
|
|
node.id = ctx.frameSeq
|
|
|
|
node.children = make(map[uint64]frameNode)
|
|
|
|
parent.children[frame.PC] = node
|
|
|
|
ctx.data.Frames[strconv.Itoa(node.id)] = ViewerFrame{fmt.Sprintf("%v:%v", frame.Fn, frame.Line), parent.id}
|
|
|
|
}
|
|
|
|
return ctx.buildBranch(node, stk)
|
|
|
|
}
|