mirror of
https://github.com/golang/go
synced 2024-11-11 21:10:21 -07:00
cmd/trace: revert internal/traceparser
The performance improvement is not as big as we hoped. Until the API is feature complete, we postpone the release and avoid added complexity. This change was prepared by reverting all the changes affected src/cmd/trace and src/internal/traceparser packages after golang.org/cl/137635, and then bringing back MMU computation APIs (originally in src/internal/traceparser) to the src/internal/trace package. Revert "cmd/trace: use new traceparser to parse the raw trace files" This reverts https://golang.org/cl/145457 (commit08816cb8d7
). Revert "internal/traceparser: provide parser that uses less space and parses segments of runtime trace files" This reverts https://golang.org/cl/137635 (commitdaaf361f74
). Change-Id: Ic2a068a7dbaf4053cd9674ca7bde9c58e74385b4 Reviewed-on: https://go-review.googlesource.com/c/150517 Run-TryBot: Hyang-Ah Hana Kim <hyangah@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Austin Clements <austin@google.com>
This commit is contained in:
parent
47df645473
commit
6d5caf38e3
@ -157,9 +157,6 @@ go src=..
|
||||
trace
|
||||
testdata
|
||||
+
|
||||
traceparser
|
||||
testdata
|
||||
+
|
||||
io
|
||||
+
|
||||
mime
|
||||
|
@ -8,6 +8,7 @@ import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"internal/trace"
|
||||
"log"
|
||||
"math"
|
||||
"net/http"
|
||||
@ -16,8 +17,6 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
trace "internal/traceparser"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -309,7 +308,7 @@ func analyzeAnnotations() (annotationAnalysisResult, error) {
|
||||
}
|
||||
}
|
||||
// combine region info.
|
||||
analyzeGoroutines(res)
|
||||
analyzeGoroutines(events)
|
||||
for goid, stats := range gs {
|
||||
// gs is a global var defined in goroutines.go as a result
|
||||
// of analyzeGoroutines. TODO(hyangah): fix this not to depend
|
||||
@ -322,7 +321,7 @@ func analyzeAnnotations() (annotationAnalysisResult, error) {
|
||||
}
|
||||
var frame trace.Frame
|
||||
if s.Start != nil {
|
||||
frame = *res.Stacks[s.Start.StkID][0]
|
||||
frame = *s.Start.Stk[0]
|
||||
}
|
||||
id := regionTypeID{Frame: frame, Type: s.Name}
|
||||
regions[id] = append(regions[id], regionDesc{UserRegionDesc: s, G: goid})
|
||||
|
@ -11,7 +11,7 @@ import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"internal/traceparser"
|
||||
traceparser "internal/trace"
|
||||
"io/ioutil"
|
||||
"reflect"
|
||||
"runtime/debug"
|
||||
@ -338,8 +338,10 @@ func traceProgram(t *testing.T, f func(), name string) error {
|
||||
trace.Stop()
|
||||
|
||||
saveTrace(buf, name)
|
||||
res, err := traceparser.ParseBuffer(buf)
|
||||
if err != nil {
|
||||
res, err := traceparser.Parse(buf, name+".faketrace")
|
||||
if err == traceparser.ErrTimeOrder {
|
||||
t.Skipf("skipping due to golang.org/issue/16755: %v", err)
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -368,15 +370,15 @@ func childrenNames(task *taskDesc) (ret []string) {
|
||||
return ret
|
||||
}
|
||||
|
||||
func swapLoaderData(res *traceparser.Parsed, err error) {
|
||||
func swapLoaderData(res traceparser.ParseResult, err error) {
|
||||
// swap loader's data.
|
||||
parseTrace() // fool loader.once.
|
||||
|
||||
loader.res = res
|
||||
loader.err = err
|
||||
|
||||
analyzeGoroutines(res) // fool gsInit once.
|
||||
gs = res.GoroutineStats()
|
||||
analyzeGoroutines(nil) // fool gsInit once.
|
||||
gs = traceparser.GoroutineStats(res.Events)
|
||||
|
||||
}
|
||||
|
||||
|
@ -9,6 +9,7 @@ package main
|
||||
import (
|
||||
"fmt"
|
||||
"html/template"
|
||||
"internal/trace"
|
||||
"log"
|
||||
"net/http"
|
||||
"reflect"
|
||||
@ -16,8 +17,6 @@ import (
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
trace "internal/traceparser"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -39,15 +38,15 @@ var (
|
||||
)
|
||||
|
||||
// analyzeGoroutines generates statistics about execution of all goroutines and stores them in gs.
|
||||
func analyzeGoroutines(res *trace.Parsed) {
|
||||
func analyzeGoroutines(events []*trace.Event) {
|
||||
gsInit.Do(func() {
|
||||
gs = res.GoroutineStats()
|
||||
gs = trace.GoroutineStats(events)
|
||||
})
|
||||
}
|
||||
|
||||
// httpGoroutines serves list of goroutine groups.
|
||||
func httpGoroutines(w http.ResponseWriter, r *http.Request) {
|
||||
events, err := parseTrace()
|
||||
events, err := parseEvents()
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
@ -90,7 +89,7 @@ Goroutines: <br>
|
||||
func httpGoroutine(w http.ResponseWriter, r *http.Request) {
|
||||
// TODO(hyangah): support format=csv (raw data)
|
||||
|
||||
events, err := parseTrace()
|
||||
events, err := parseEvents()
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
|
@ -5,12 +5,12 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"bufio"
|
||||
"cmd/internal/browser"
|
||||
"flag"
|
||||
"fmt"
|
||||
"html/template"
|
||||
trace "internal/traceparser"
|
||||
"internal/trace"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
@ -115,22 +115,8 @@ func main() {
|
||||
dief("%v\n", err)
|
||||
}
|
||||
|
||||
if *debugFlag { // match go tool trace -d (except for Offset and Seq)
|
||||
f := func(ev *trace.Event) {
|
||||
desc := trace.EventDescriptions[ev.Type]
|
||||
w := new(bytes.Buffer)
|
||||
fmt.Fprintf(w, "%v %v p=%v g=%v", ev.Ts, desc.Name, ev.P, ev.G)
|
||||
for i, a := range desc.Args {
|
||||
fmt.Fprintf(w, " %v=%v", a, ev.Args[i])
|
||||
}
|
||||
for i, a := range desc.SArgs {
|
||||
fmt.Fprintf(w, " %v=%v", a, ev.SArgs[i])
|
||||
}
|
||||
fmt.Println(w.String())
|
||||
}
|
||||
for i := 0; i < len(res.Events); i++ {
|
||||
f(res.Events[i])
|
||||
}
|
||||
if *debugFlag {
|
||||
trace.Print(res.Events)
|
||||
os.Exit(0)
|
||||
}
|
||||
reportMemoryUsage("after parsing trace")
|
||||
@ -155,23 +141,36 @@ var ranges []Range
|
||||
|
||||
var loader struct {
|
||||
once sync.Once
|
||||
res *trace.Parsed
|
||||
res trace.ParseResult
|
||||
err error
|
||||
}
|
||||
|
||||
func parseTrace() (*trace.Parsed, error) {
|
||||
// parseEvents is a compatibility wrapper that returns only
|
||||
// the Events part of trace.ParseResult returned by parseTrace.
|
||||
func parseEvents() ([]*trace.Event, error) {
|
||||
res, err := parseTrace()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.Events, err
|
||||
}
|
||||
|
||||
func parseTrace() (trace.ParseResult, error) {
|
||||
loader.once.Do(func() {
|
||||
x, err := trace.New(traceFile)
|
||||
tracef, err := os.Open(traceFile)
|
||||
if err != nil {
|
||||
loader.err = err
|
||||
loader.err = fmt.Errorf("failed to open trace file: %v", err)
|
||||
return
|
||||
}
|
||||
err = x.Parse(0, x.MaxTs, nil)
|
||||
defer tracef.Close()
|
||||
|
||||
// Parse and symbolize.
|
||||
res, err := trace.Parse(bufio.NewReader(tracef), programBinary)
|
||||
if err != nil {
|
||||
loader.err = err
|
||||
loader.err = fmt.Errorf("failed to parse trace: %v", err)
|
||||
return
|
||||
}
|
||||
loader.res = x
|
||||
loader.res = res
|
||||
})
|
||||
return loader.res, loader.err
|
||||
}
|
||||
|
@ -28,7 +28,7 @@ package main
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
trace "internal/traceparser"
|
||||
"internal/trace"
|
||||
"log"
|
||||
"math"
|
||||
"net/http"
|
||||
@ -83,11 +83,11 @@ func getMMUCurve(r *http.Request) ([][]trace.MutatorUtil, *trace.MMUCurve, error
|
||||
mmuCache.lock.Unlock()
|
||||
|
||||
c.init.Do(func() {
|
||||
tr, err := parseTrace()
|
||||
events, err := parseEvents()
|
||||
if err != nil {
|
||||
c.err = err
|
||||
} else {
|
||||
c.util = tr.MutatorUtilization(flags)
|
||||
c.util = trace.MutatorUtilization(events, flags)
|
||||
c.mmuCurve = trace.NewMMUCurve(c.util)
|
||||
}
|
||||
})
|
||||
|
@ -9,6 +9,7 @@ package main
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"internal/trace"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
@ -20,8 +21,6 @@ import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
trace "internal/traceparser"
|
||||
|
||||
"github.com/google/pprof/profile"
|
||||
)
|
||||
|
||||
@ -61,22 +60,22 @@ type interval struct {
|
||||
begin, end int64 // nanoseconds.
|
||||
}
|
||||
|
||||
func pprofByGoroutine(compute func(io.Writer, map[uint64][]interval, *trace.Parsed) error) func(w io.Writer, r *http.Request) error {
|
||||
func pprofByGoroutine(compute func(io.Writer, map[uint64][]interval, []*trace.Event) error) func(w io.Writer, r *http.Request) error {
|
||||
return func(w io.Writer, r *http.Request) error {
|
||||
id := r.FormValue("id")
|
||||
res, err := parseTrace()
|
||||
events, err := parseEvents()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
gToIntervals, err := pprofMatchingGoroutines(id, res)
|
||||
gToIntervals, err := pprofMatchingGoroutines(id, events)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return compute(w, gToIntervals, res)
|
||||
return compute(w, gToIntervals, events)
|
||||
}
|
||||
}
|
||||
|
||||
func pprofByRegion(compute func(io.Writer, map[uint64][]interval, *trace.Parsed) error) func(w io.Writer, r *http.Request) error {
|
||||
func pprofByRegion(compute func(io.Writer, map[uint64][]interval, []*trace.Event) error) func(w io.Writer, r *http.Request) error {
|
||||
return func(w io.Writer, r *http.Request) error {
|
||||
filter, err := newRegionFilter(r)
|
||||
if err != nil {
|
||||
@ -86,7 +85,7 @@ func pprofByRegion(compute func(io.Writer, map[uint64][]interval, *trace.Parsed)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
events, _ := parseTrace()
|
||||
events, _ := parseEvents()
|
||||
|
||||
return compute(w, gToIntervals, events)
|
||||
}
|
||||
@ -95,7 +94,7 @@ func pprofByRegion(compute func(io.Writer, map[uint64][]interval, *trace.Parsed)
|
||||
// pprofMatchingGoroutines parses the goroutine type id string (i.e. pc)
|
||||
// and returns the ids of goroutines of the matching type and its interval.
|
||||
// If the id string is empty, returns nil without an error.
|
||||
func pprofMatchingGoroutines(id string, p *trace.Parsed) (map[uint64][]interval, error) {
|
||||
func pprofMatchingGoroutines(id string, events []*trace.Event) (map[uint64][]interval, error) {
|
||||
if id == "" {
|
||||
return nil, nil
|
||||
}
|
||||
@ -103,7 +102,7 @@ func pprofMatchingGoroutines(id string, p *trace.Parsed) (map[uint64][]interval,
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid goroutine type: %v", id)
|
||||
}
|
||||
analyzeGoroutines(p)
|
||||
analyzeGoroutines(events)
|
||||
var res map[uint64][]interval
|
||||
for _, g := range gs {
|
||||
if g.PC != pc {
|
||||
@ -172,25 +171,17 @@ func pprofMatchingRegions(filter *regionFilter) (map[uint64][]interval, error) {
|
||||
return gToIntervals, nil
|
||||
}
|
||||
|
||||
func stklen(p *trace.Parsed, ev *trace.Event) int {
|
||||
if ev.StkID == 0 {
|
||||
return 0
|
||||
}
|
||||
return len(p.Stacks[ev.StkID])
|
||||
}
|
||||
|
||||
// computePprofIO generates IO pprof-like profile (time spent in IO wait, currently only network blocking event).
|
||||
func computePprofIO(w io.Writer, gToIntervals map[uint64][]interval, res *trace.Parsed) error {
|
||||
events := res.Events
|
||||
prof := make(map[uint32]Record)
|
||||
func computePprofIO(w io.Writer, gToIntervals map[uint64][]interval, events []*trace.Event) error {
|
||||
prof := make(map[uint64]Record)
|
||||
for _, ev := range events {
|
||||
if ev.Type != trace.EvGoBlockNet || ev.Link == nil || ev.StkID == 0 || stklen(res, ev) == 0 {
|
||||
if ev.Type != trace.EvGoBlockNet || ev.Link == nil || ev.StkID == 0 || len(ev.Stk) == 0 {
|
||||
continue
|
||||
}
|
||||
overlapping := pprofOverlappingDuration(gToIntervals, ev)
|
||||
if overlapping > 0 {
|
||||
rec := prof[ev.StkID]
|
||||
rec.stk = res.Stacks[ev.StkID]
|
||||
rec.stk = ev.Stk
|
||||
rec.n++
|
||||
rec.time += overlapping.Nanoseconds()
|
||||
prof[ev.StkID] = rec
|
||||
@ -200,9 +191,8 @@ func computePprofIO(w io.Writer, gToIntervals map[uint64][]interval, res *trace.
|
||||
}
|
||||
|
||||
// computePprofBlock generates blocking pprof-like profile (time spent blocked on synchronization primitives).
|
||||
func computePprofBlock(w io.Writer, gToIntervals map[uint64][]interval, res *trace.Parsed) error {
|
||||
events := res.Events
|
||||
prof := make(map[uint32]Record)
|
||||
func computePprofBlock(w io.Writer, gToIntervals map[uint64][]interval, events []*trace.Event) error {
|
||||
prof := make(map[uint64]Record)
|
||||
for _, ev := range events {
|
||||
switch ev.Type {
|
||||
case trace.EvGoBlockSend, trace.EvGoBlockRecv, trace.EvGoBlockSelect,
|
||||
@ -213,13 +203,13 @@ func computePprofBlock(w io.Writer, gToIntervals map[uint64][]interval, res *tra
|
||||
default:
|
||||
continue
|
||||
}
|
||||
if ev.Link == nil || ev.StkID == 0 || stklen(res, ev) == 0 {
|
||||
if ev.Link == nil || ev.StkID == 0 || len(ev.Stk) == 0 {
|
||||
continue
|
||||
}
|
||||
overlapping := pprofOverlappingDuration(gToIntervals, ev)
|
||||
if overlapping > 0 {
|
||||
rec := prof[ev.StkID]
|
||||
rec.stk = res.Stacks[ev.StkID]
|
||||
rec.stk = ev.Stk
|
||||
rec.n++
|
||||
rec.time += overlapping.Nanoseconds()
|
||||
prof[ev.StkID] = rec
|
||||
@ -229,17 +219,16 @@ func computePprofBlock(w io.Writer, gToIntervals map[uint64][]interval, res *tra
|
||||
}
|
||||
|
||||
// computePprofSyscall generates syscall pprof-like profile (time spent blocked in syscalls).
|
||||
func computePprofSyscall(w io.Writer, gToIntervals map[uint64][]interval, res *trace.Parsed) error {
|
||||
events := res.Events
|
||||
prof := make(map[uint32]Record)
|
||||
func computePprofSyscall(w io.Writer, gToIntervals map[uint64][]interval, events []*trace.Event) error {
|
||||
prof := make(map[uint64]Record)
|
||||
for _, ev := range events {
|
||||
if ev.Type != trace.EvGoSysCall || ev.Link == nil || ev.StkID == 0 || stklen(res, ev) == 0 {
|
||||
if ev.Type != trace.EvGoSysCall || ev.Link == nil || ev.StkID == 0 || len(ev.Stk) == 0 {
|
||||
continue
|
||||
}
|
||||
overlapping := pprofOverlappingDuration(gToIntervals, ev)
|
||||
if overlapping > 0 {
|
||||
rec := prof[ev.StkID]
|
||||
rec.stk = res.Stacks[ev.StkID]
|
||||
rec.stk = ev.Stk
|
||||
rec.n++
|
||||
rec.time += overlapping.Nanoseconds()
|
||||
prof[ev.StkID] = rec
|
||||
@ -250,18 +239,17 @@ func computePprofSyscall(w io.Writer, gToIntervals map[uint64][]interval, res *t
|
||||
|
||||
// computePprofSched generates scheduler latency pprof-like profile
|
||||
// (time between a goroutine become runnable and actually scheduled for execution).
|
||||
func computePprofSched(w io.Writer, gToIntervals map[uint64][]interval, res *trace.Parsed) error {
|
||||
events := res.Events
|
||||
prof := make(map[uint32]Record)
|
||||
func computePprofSched(w io.Writer, gToIntervals map[uint64][]interval, events []*trace.Event) error {
|
||||
prof := make(map[uint64]Record)
|
||||
for _, ev := range events {
|
||||
if (ev.Type != trace.EvGoUnblock && ev.Type != trace.EvGoCreate) ||
|
||||
ev.Link == nil || ev.StkID == 0 || stklen(res, ev) == 0 {
|
||||
ev.Link == nil || ev.StkID == 0 || len(ev.Stk) == 0 {
|
||||
continue
|
||||
}
|
||||
overlapping := pprofOverlappingDuration(gToIntervals, ev)
|
||||
if overlapping > 0 {
|
||||
rec := prof[ev.StkID]
|
||||
rec.stk = res.Stacks[ev.StkID]
|
||||
rec.stk = ev.Stk
|
||||
rec.n++
|
||||
rec.time += overlapping.Nanoseconds()
|
||||
prof[ev.StkID] = rec
|
||||
@ -339,7 +327,7 @@ func serveSVGProfile(prof func(w io.Writer, r *http.Request) error) http.Handler
|
||||
}
|
||||
}
|
||||
|
||||
func buildProfile(prof map[uint32]Record) *profile.Profile {
|
||||
func buildProfile(prof map[uint64]Record) *profile.Profile {
|
||||
p := &profile.Profile{
|
||||
PeriodType: &profile.ValueType{Type: "trace", Unit: "count"},
|
||||
Period: 1,
|
||||
|
@ -7,7 +7,7 @@ package main
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
trace "internal/traceparser"
|
||||
"internal/trace"
|
||||
"io"
|
||||
"log"
|
||||
"math"
|
||||
@ -23,7 +23,7 @@ import (
|
||||
|
||||
func init() {
|
||||
http.HandleFunc("/trace", httpTrace)
|
||||
http.HandleFunc("/jsontrace", httpJSONTrace)
|
||||
http.HandleFunc("/jsontrace", httpJsonTrace)
|
||||
http.HandleFunc("/trace_viewer_html", httpTraceViewerHTML)
|
||||
}
|
||||
|
||||
@ -38,7 +38,7 @@ func httpTrace(w http.ResponseWriter, r *http.Request) {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
html := strings.Replace(templTrace, "{{PARAMS}}", r.Form.Encode(), -1)
|
||||
html := strings.ReplaceAll(templTrace, "{{PARAMS}}", r.Form.Encode())
|
||||
w.Write([]byte(html))
|
||||
|
||||
}
|
||||
@ -165,8 +165,8 @@ func httpTraceViewerHTML(w http.ResponseWriter, r *http.Request) {
|
||||
http.ServeFile(w, r, filepath.Join(runtime.GOROOT(), "misc", "trace", "trace_viewer_full.html"))
|
||||
}
|
||||
|
||||
// httpJSONTrace serves json trace, requested from within templTrace HTML.
|
||||
func httpJSONTrace(w http.ResponseWriter, r *http.Request) {
|
||||
// httpJsonTrace serves json trace, requested from within templTrace HTML.
|
||||
func httpJsonTrace(w http.ResponseWriter, r *http.Request) {
|
||||
defer debug.FreeOSMemory()
|
||||
defer reportMemoryUsage("after httpJsonTrace")
|
||||
// This is an AJAX handler, so instead of http.Error we use log.Printf to log errors.
|
||||
@ -188,7 +188,7 @@ func httpJSONTrace(w http.ResponseWriter, r *http.Request) {
|
||||
log.Printf("failed to parse goid parameter %q: %v", goids, err)
|
||||
return
|
||||
}
|
||||
analyzeGoroutines(res)
|
||||
analyzeGoroutines(res.Events)
|
||||
g, ok := gs[goid]
|
||||
if !ok {
|
||||
log.Printf("failed to find goroutine %d", goid)
|
||||
@ -202,7 +202,7 @@ func httpJSONTrace(w http.ResponseWriter, r *http.Request) {
|
||||
params.endTime = lastTimestamp()
|
||||
}
|
||||
params.maing = goid
|
||||
params.gs = res.RelatedGoroutines(goid)
|
||||
params.gs = trace.RelatedGoroutines(res.Events, goid)
|
||||
} else if taskids := r.FormValue("taskid"); taskids != "" {
|
||||
taskid, err := strconv.ParseUint(taskids, 10, 64)
|
||||
if err != nil {
|
||||
@ -264,13 +264,12 @@ func httpJSONTrace(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
c := viewerDataTraceConsumer(w, start, end)
|
||||
if err := generateTrace(res, params, c); err != nil {
|
||||
if err := generateTrace(params, c); err != nil {
|
||||
log.Printf("failed to generate trace: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Range is a named range
|
||||
type Range struct {
|
||||
Name string
|
||||
Start int
|
||||
@ -286,13 +285,13 @@ func (r Range) URL() string {
|
||||
// splitTrace splits the trace into a number of ranges,
|
||||
// each resulting in approx 100MB of json output
|
||||
// (trace viewer can hardly handle more).
|
||||
func splitTrace(res *trace.Parsed) []Range {
|
||||
func splitTrace(res trace.ParseResult) []Range {
|
||||
params := &traceParams{
|
||||
parsed: res,
|
||||
endTime: math.MaxInt64,
|
||||
}
|
||||
s, c := splittingTraceConsumer(100 << 20) // 100M
|
||||
if err := generateTrace(res, params, c); err != nil {
|
||||
if err := generateTrace(params, c); err != nil {
|
||||
dief("%v\n", err)
|
||||
}
|
||||
return s.Ranges
|
||||
@ -309,7 +308,7 @@ func splittingTraceConsumer(max int) (*splitter, traceConsumer) {
|
||||
}
|
||||
|
||||
var (
|
||||
data = viewerData{Frames: make(map[string]viewerFrame)}
|
||||
data = ViewerData{Frames: make(map[string]ViewerFrame)}
|
||||
|
||||
sizes []eventSz
|
||||
cw countingWriter
|
||||
@ -321,7 +320,7 @@ func splittingTraceConsumer(max int) (*splitter, traceConsumer) {
|
||||
consumeTimeUnit: func(unit string) {
|
||||
data.TimeUnit = unit
|
||||
},
|
||||
consumeViewerEvent: func(v *viewerEvent, required bool) {
|
||||
consumeViewerEvent: func(v *ViewerEvent, required bool) {
|
||||
if required {
|
||||
// Store required events inside data
|
||||
// so flush can include them in the required
|
||||
@ -334,7 +333,7 @@ func splittingTraceConsumer(max int) (*splitter, traceConsumer) {
|
||||
sizes = append(sizes, eventSz{v.Time, cw.size + 1}) // +1 for ",".
|
||||
cw.size = 0
|
||||
},
|
||||
consumeViewerFrame: func(k string, v viewerFrame) {
|
||||
consumeViewerFrame: func(k string, v ViewerFrame) {
|
||||
data.Frames[k] = v
|
||||
},
|
||||
flush: func() {
|
||||
@ -395,7 +394,7 @@ func (cw *countingWriter) Write(data []byte) (int, error) {
|
||||
}
|
||||
|
||||
type traceParams struct {
|
||||
parsed *trace.Parsed
|
||||
parsed trace.ParseResult
|
||||
mode traceviewMode
|
||||
startTime int64
|
||||
endTime int64
|
||||
@ -412,7 +411,6 @@ const (
|
||||
)
|
||||
|
||||
type traceContext struct {
|
||||
res *trace.Parsed
|
||||
*traceParams
|
||||
consumer traceConsumer
|
||||
frameTree frameNode
|
||||
@ -463,16 +461,16 @@ type gInfo struct {
|
||||
markAssist *trace.Event // if non-nil, the mark assist currently running.
|
||||
}
|
||||
|
||||
type viewerData struct {
|
||||
Events []*viewerEvent `json:"traceEvents"`
|
||||
Frames map[string]viewerFrame `json:"stackFrames"`
|
||||
type ViewerData struct {
|
||||
Events []*ViewerEvent `json:"traceEvents"`
|
||||
Frames map[string]ViewerFrame `json:"stackFrames"`
|
||||
TimeUnit string `json:"displayTimeUnit"`
|
||||
|
||||
// This is where mandatory part of the trace starts (e.g. thread names)
|
||||
footer int
|
||||
}
|
||||
|
||||
type viewerEvent struct {
|
||||
type ViewerEvent struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
Phase string `json:"ph"`
|
||||
Scope string `json:"s,omitempty"`
|
||||
@ -488,33 +486,33 @@ type viewerEvent struct {
|
||||
Category string `json:"cat,omitempty"`
|
||||
}
|
||||
|
||||
type viewerFrame struct {
|
||||
type ViewerFrame struct {
|
||||
Name string `json:"name"`
|
||||
Parent int `json:"parent,omitempty"`
|
||||
}
|
||||
|
||||
type nameArg struct {
|
||||
type NameArg struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
type taskArg struct {
|
||||
type TaskArg struct {
|
||||
ID uint64 `json:"id"`
|
||||
StartG uint64 `json:"start_g,omitempty"`
|
||||
EndG uint64 `json:"end_g,omitempty"`
|
||||
}
|
||||
|
||||
type regionArg struct {
|
||||
type RegionArg struct {
|
||||
TaskID uint64 `json:"taskid,omitempty"`
|
||||
}
|
||||
|
||||
type sortIndexArg struct {
|
||||
type SortIndexArg struct {
|
||||
Index int `json:"sort_index"`
|
||||
}
|
||||
|
||||
type traceConsumer struct {
|
||||
consumeTimeUnit func(unit string)
|
||||
consumeViewerEvent func(v *viewerEvent, required bool)
|
||||
consumeViewerFrame func(key string, f viewerFrame)
|
||||
consumeViewerEvent func(v *ViewerEvent, required bool)
|
||||
consumeViewerFrame func(key string, f ViewerFrame)
|
||||
flush func()
|
||||
}
|
||||
|
||||
@ -531,15 +529,15 @@ const (
|
||||
// If mode==goroutineMode, generate trace for goroutine goid, otherwise whole trace.
|
||||
// startTime, endTime determine part of the trace that we are interested in.
|
||||
// gset restricts goroutines that are included in the resulting trace.
|
||||
func generateTrace(res *trace.Parsed, params *traceParams, consumer traceConsumer) error {
|
||||
func generateTrace(params *traceParams, consumer traceConsumer) error {
|
||||
defer consumer.flush()
|
||||
|
||||
ctx := &traceContext{res: res, traceParams: params}
|
||||
ctx := &traceContext{traceParams: params}
|
||||
ctx.frameTree.children = make(map[uint64]frameNode)
|
||||
ctx.consumer = consumer
|
||||
|
||||
ctx.consumer.consumeTimeUnit("ns")
|
||||
maxProc := int32(0)
|
||||
maxProc := 0
|
||||
ginfos := make(map[uint64]*gInfo)
|
||||
stacks := params.parsed.Stacks
|
||||
|
||||
@ -584,12 +582,12 @@ func generateTrace(res *trace.Parsed, params *traceParams, consumer traceConsume
|
||||
newG := ev.Args[0]
|
||||
info := getGInfo(newG)
|
||||
if info.name != "" {
|
||||
return fmt.Errorf("duplicate go create event for go id=%d detected at time %d", newG, ev.Ts)
|
||||
return fmt.Errorf("duplicate go create event for go id=%d detected at offset %d", newG, ev.Off)
|
||||
}
|
||||
|
||||
stk, ok := stacks[uint32(ev.Args[1])]
|
||||
stk, ok := stacks[ev.Args[1]]
|
||||
if !ok || len(stk) == 0 {
|
||||
return fmt.Errorf("invalid go create event: missing stack information for go id=%d at time %d", newG, ev.Ts)
|
||||
return fmt.Errorf("invalid go create event: missing stack information for go id=%d at offset %d", newG, ev.Off)
|
||||
}
|
||||
|
||||
fname := stk[0].Fn
|
||||
@ -760,23 +758,23 @@ func generateTrace(res *trace.Parsed, params *traceParams, consumer traceConsume
|
||||
ctx.emitSectionFooter(procsSection, "PROCS", 2)
|
||||
}
|
||||
|
||||
ctx.emitFooter(&viewerEvent{Name: "thread_name", Phase: "M", Pid: procsSection, Tid: trace.GCP, Arg: &nameArg{"GC"}})
|
||||
ctx.emitFooter(&viewerEvent{Name: "thread_sort_index", Phase: "M", Pid: procsSection, Tid: trace.GCP, Arg: &sortIndexArg{-6}})
|
||||
ctx.emitFooter(&ViewerEvent{Name: "thread_name", Phase: "M", Pid: procsSection, Tid: trace.GCP, Arg: &NameArg{"GC"}})
|
||||
ctx.emitFooter(&ViewerEvent{Name: "thread_sort_index", Phase: "M", Pid: procsSection, Tid: trace.GCP, Arg: &SortIndexArg{-6}})
|
||||
|
||||
ctx.emitFooter(&viewerEvent{Name: "thread_name", Phase: "M", Pid: procsSection, Tid: trace.NetpollP, Arg: &nameArg{"Network"}})
|
||||
ctx.emitFooter(&viewerEvent{Name: "thread_sort_index", Phase: "M", Pid: procsSection, Tid: trace.NetpollP, Arg: &sortIndexArg{-5}})
|
||||
ctx.emitFooter(&ViewerEvent{Name: "thread_name", Phase: "M", Pid: procsSection, Tid: trace.NetpollP, Arg: &NameArg{"Network"}})
|
||||
ctx.emitFooter(&ViewerEvent{Name: "thread_sort_index", Phase: "M", Pid: procsSection, Tid: trace.NetpollP, Arg: &SortIndexArg{-5}})
|
||||
|
||||
ctx.emitFooter(&viewerEvent{Name: "thread_name", Phase: "M", Pid: procsSection, Tid: trace.TimerP, Arg: &nameArg{"Timers"}})
|
||||
ctx.emitFooter(&viewerEvent{Name: "thread_sort_index", Phase: "M", Pid: procsSection, Tid: trace.TimerP, Arg: &sortIndexArg{-4}})
|
||||
ctx.emitFooter(&ViewerEvent{Name: "thread_name", Phase: "M", Pid: procsSection, Tid: trace.TimerP, Arg: &NameArg{"Timers"}})
|
||||
ctx.emitFooter(&ViewerEvent{Name: "thread_sort_index", Phase: "M", Pid: procsSection, Tid: trace.TimerP, Arg: &SortIndexArg{-4}})
|
||||
|
||||
ctx.emitFooter(&viewerEvent{Name: "thread_name", Phase: "M", Pid: procsSection, Tid: trace.SyscallP, Arg: &nameArg{"Syscalls"}})
|
||||
ctx.emitFooter(&viewerEvent{Name: "thread_sort_index", Phase: "M", Pid: procsSection, Tid: trace.SyscallP, Arg: &sortIndexArg{-3}})
|
||||
ctx.emitFooter(&ViewerEvent{Name: "thread_name", Phase: "M", Pid: procsSection, Tid: trace.SyscallP, Arg: &NameArg{"Syscalls"}})
|
||||
ctx.emitFooter(&ViewerEvent{Name: "thread_sort_index", Phase: "M", Pid: procsSection, Tid: trace.SyscallP, Arg: &SortIndexArg{-3}})
|
||||
|
||||
// Display rows for Ps if we are in the default trace view mode (not goroutine-oriented presentation)
|
||||
if ctx.mode&modeGoroutineOriented == 0 {
|
||||
for i := 0; i <= int(maxProc); i++ {
|
||||
ctx.emitFooter(&viewerEvent{Name: "thread_name", Phase: "M", Pid: procsSection, Tid: uint64(i), Arg: &nameArg{fmt.Sprintf("Proc %v", i)}})
|
||||
ctx.emitFooter(&viewerEvent{Name: "thread_sort_index", Phase: "M", Pid: procsSection, Tid: uint64(i), Arg: &sortIndexArg{i}})
|
||||
for i := 0; i <= maxProc; i++ {
|
||||
ctx.emitFooter(&ViewerEvent{Name: "thread_name", Phase: "M", Pid: procsSection, Tid: uint64(i), Arg: &NameArg{fmt.Sprintf("Proc %v", i)}})
|
||||
ctx.emitFooter(&ViewerEvent{Name: "thread_sort_index", Phase: "M", Pid: procsSection, Tid: uint64(i), Arg: &SortIndexArg{i}})
|
||||
}
|
||||
}
|
||||
|
||||
@ -814,27 +812,27 @@ func generateTrace(res *trace.Parsed, params *traceParams, consumer traceConsume
|
||||
if !ctx.gs[k] {
|
||||
continue
|
||||
}
|
||||
ctx.emitFooter(&viewerEvent{Name: "thread_name", Phase: "M", Pid: procsSection, Tid: k, Arg: &nameArg{v.name}})
|
||||
ctx.emitFooter(&ViewerEvent{Name: "thread_name", Phase: "M", Pid: procsSection, Tid: k, Arg: &NameArg{v.name}})
|
||||
}
|
||||
// Row for the main goroutine (maing)
|
||||
ctx.emitFooter(&viewerEvent{Name: "thread_sort_index", Phase: "M", Pid: procsSection, Tid: ctx.maing, Arg: &sortIndexArg{-2}})
|
||||
ctx.emitFooter(&ViewerEvent{Name: "thread_sort_index", Phase: "M", Pid: procsSection, Tid: ctx.maing, Arg: &SortIndexArg{-2}})
|
||||
// Row for GC or global state (specified with G=0)
|
||||
ctx.emitFooter(&viewerEvent{Name: "thread_sort_index", Phase: "M", Pid: procsSection, Tid: 0, Arg: &sortIndexArg{-1}})
|
||||
ctx.emitFooter(&ViewerEvent{Name: "thread_sort_index", Phase: "M", Pid: procsSection, Tid: 0, Arg: &SortIndexArg{-1}})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ctx *traceContext) emit(e *viewerEvent) {
|
||||
func (ctx *traceContext) emit(e *ViewerEvent) {
|
||||
ctx.consumer.consumeViewerEvent(e, false)
|
||||
}
|
||||
|
||||
func (ctx *traceContext) emitFooter(e *viewerEvent) {
|
||||
func (ctx *traceContext) emitFooter(e *ViewerEvent) {
|
||||
ctx.consumer.consumeViewerEvent(e, true)
|
||||
}
|
||||
func (ctx *traceContext) emitSectionFooter(sectionID uint64, name string, priority int) {
|
||||
ctx.emitFooter(&viewerEvent{Name: "process_name", Phase: "M", Pid: sectionID, Arg: &nameArg{name}})
|
||||
ctx.emitFooter(&viewerEvent{Name: "process_sort_index", Phase: "M", Pid: sectionID, Arg: &sortIndexArg{priority}})
|
||||
ctx.emitFooter(&ViewerEvent{Name: "process_name", Phase: "M", Pid: sectionID, Arg: &NameArg{name}})
|
||||
ctx.emitFooter(&ViewerEvent{Name: "process_sort_index", Phase: "M", Pid: sectionID, Arg: &SortIndexArg{priority}})
|
||||
}
|
||||
|
||||
func (ctx *traceContext) time(ev *trace.Event) float64 {
|
||||
@ -856,30 +854,31 @@ func tsWithinRange(ts, s, e int64) bool {
|
||||
func (ctx *traceContext) proc(ev *trace.Event) uint64 {
|
||||
if ctx.mode&modeGoroutineOriented != 0 && ev.P < trace.FakeP {
|
||||
return ev.G
|
||||
} else {
|
||||
return uint64(ev.P)
|
||||
}
|
||||
return uint64(ev.P)
|
||||
}
|
||||
|
||||
func (ctx *traceContext) emitSlice(ev *trace.Event, name string) {
|
||||
ctx.emit(ctx.makeSlice(ev, name))
|
||||
}
|
||||
|
||||
func (ctx *traceContext) makeSlice(ev *trace.Event, name string) *viewerEvent {
|
||||
// If viewerEvent.Dur is not a positive value,
|
||||
func (ctx *traceContext) makeSlice(ev *trace.Event, name string) *ViewerEvent {
|
||||
// If ViewerEvent.Dur is not a positive value,
|
||||
// trace viewer handles it as a non-terminating time interval.
|
||||
// Avoid it by setting the field with a small value.
|
||||
durationUsec := ctx.time(ev.Link) - ctx.time(ev)
|
||||
if ev.Link == nil || ev.Link.Ts-ev.Ts <= 0 {
|
||||
if ev.Link.Ts-ev.Ts <= 0 {
|
||||
durationUsec = 0.0001 // 0.1 nanoseconds
|
||||
}
|
||||
sl := &viewerEvent{
|
||||
sl := &ViewerEvent{
|
||||
Name: name,
|
||||
Phase: "X",
|
||||
Time: ctx.time(ev),
|
||||
Dur: durationUsec,
|
||||
Tid: ctx.proc(ev),
|
||||
Stack: ctx.stack(ctx.res.Stacks[ev.StkID]),
|
||||
EndStack: ctx.stack(ctx.res.Stacks[ev.Link.StkID]),
|
||||
Stack: ctx.stack(ev.Stk),
|
||||
EndStack: ctx.stack(ev.Link.Stk),
|
||||
}
|
||||
|
||||
// grey out non-overlapping events if the event is not a global event (ev.G == 0)
|
||||
@ -889,7 +888,7 @@ func (ctx *traceContext) makeSlice(ev *trace.Event, name string) *viewerEvent {
|
||||
type Arg struct {
|
||||
P int
|
||||
}
|
||||
sl.Arg = &Arg{P: int(ev.P)}
|
||||
sl.Arg = &Arg{P: ev.P}
|
||||
}
|
||||
// grey out non-overlapping events.
|
||||
overlapping := false
|
||||
@ -911,10 +910,10 @@ func (ctx *traceContext) emitTask(task *taskDesc, sortIndex int) {
|
||||
taskName := task.name
|
||||
durationUsec := float64(task.lastTimestamp()-task.firstTimestamp()) / 1e3
|
||||
|
||||
ctx.emitFooter(&viewerEvent{Name: "thread_name", Phase: "M", Pid: tasksSection, Tid: taskRow, Arg: &nameArg{fmt.Sprintf("T%d %s", task.id, taskName)}})
|
||||
ctx.emit(&viewerEvent{Name: "thread_sort_index", Phase: "M", Pid: tasksSection, Tid: taskRow, Arg: &sortIndexArg{sortIndex}})
|
||||
ctx.emitFooter(&ViewerEvent{Name: "thread_name", Phase: "M", Pid: tasksSection, Tid: taskRow, Arg: &NameArg{fmt.Sprintf("T%d %s", task.id, taskName)}})
|
||||
ctx.emit(&ViewerEvent{Name: "thread_sort_index", Phase: "M", Pid: tasksSection, Tid: taskRow, Arg: &SortIndexArg{sortIndex}})
|
||||
ts := float64(task.firstTimestamp()) / 1e3
|
||||
sl := &viewerEvent{
|
||||
sl := &ViewerEvent{
|
||||
Name: taskName,
|
||||
Phase: "X",
|
||||
Time: ts,
|
||||
@ -923,13 +922,13 @@ func (ctx *traceContext) emitTask(task *taskDesc, sortIndex int) {
|
||||
Tid: taskRow,
|
||||
Cname: pickTaskColor(task.id),
|
||||
}
|
||||
targ := taskArg{ID: task.id}
|
||||
targ := TaskArg{ID: task.id}
|
||||
if task.create != nil {
|
||||
sl.Stack = ctx.stack(ctx.res.Stacks[task.create.StkID])
|
||||
sl.Stack = ctx.stack(task.create.Stk)
|
||||
targ.StartG = task.create.G
|
||||
}
|
||||
if task.end != nil {
|
||||
sl.EndStack = ctx.stack(ctx.res.Stacks[task.end.StkID])
|
||||
sl.EndStack = ctx.stack(task.end.Stk)
|
||||
targ.EndG = task.end.G
|
||||
}
|
||||
sl.Arg = targ
|
||||
@ -937,8 +936,8 @@ func (ctx *traceContext) emitTask(task *taskDesc, sortIndex int) {
|
||||
|
||||
if task.create != nil && task.create.Type == trace.EvUserTaskCreate && task.create.Args[1] != 0 {
|
||||
ctx.arrowSeq++
|
||||
ctx.emit(&viewerEvent{Name: "newTask", Phase: "s", Tid: task.create.Args[1], ID: ctx.arrowSeq, Time: ts, Pid: tasksSection})
|
||||
ctx.emit(&viewerEvent{Name: "newTask", Phase: "t", Tid: taskRow, ID: ctx.arrowSeq, Time: ts, Pid: tasksSection})
|
||||
ctx.emit(&ViewerEvent{Name: "newTask", Phase: "s", Tid: task.create.Args[1], ID: ctx.arrowSeq, Time: ts, Pid: tasksSection})
|
||||
ctx.emit(&ViewerEvent{Name: "newTask", Phase: "t", Tid: taskRow, ID: ctx.arrowSeq, Time: ts, Pid: tasksSection})
|
||||
}
|
||||
}
|
||||
|
||||
@ -959,7 +958,7 @@ func (ctx *traceContext) emitRegion(s regionDesc) {
|
||||
scopeID := fmt.Sprintf("%x", id)
|
||||
name := s.Name
|
||||
|
||||
sl0 := &viewerEvent{
|
||||
sl0 := &ViewerEvent{
|
||||
Category: "Region",
|
||||
Name: name,
|
||||
Phase: "b",
|
||||
@ -970,11 +969,11 @@ func (ctx *traceContext) emitRegion(s regionDesc) {
|
||||
Cname: pickTaskColor(s.TaskID),
|
||||
}
|
||||
if s.Start != nil {
|
||||
sl0.Stack = ctx.stack(ctx.res.Stacks[s.Start.StkID])
|
||||
sl0.Stack = ctx.stack(s.Start.Stk)
|
||||
}
|
||||
ctx.emit(sl0)
|
||||
|
||||
sl1 := &viewerEvent{
|
||||
sl1 := &ViewerEvent{
|
||||
Category: "Region",
|
||||
Name: name,
|
||||
Phase: "e",
|
||||
@ -983,10 +982,10 @@ func (ctx *traceContext) emitRegion(s regionDesc) {
|
||||
ID: uint64(regionID),
|
||||
Scope: scopeID,
|
||||
Cname: pickTaskColor(s.TaskID),
|
||||
Arg: regionArg{TaskID: s.TaskID},
|
||||
Arg: RegionArg{TaskID: s.TaskID},
|
||||
}
|
||||
if s.End != nil {
|
||||
sl1.Stack = ctx.stack(ctx.res.Stacks[s.End.StkID])
|
||||
sl1.Stack = ctx.stack(s.End.Stk)
|
||||
}
|
||||
ctx.emit(sl1)
|
||||
}
|
||||
@ -1005,7 +1004,7 @@ func (ctx *traceContext) emitHeapCounters(ev *trace.Event) {
|
||||
diff = ctx.heapStats.nextGC - ctx.heapStats.heapAlloc
|
||||
}
|
||||
if tsWithinRange(ev.Ts, ctx.startTime, ctx.endTime) {
|
||||
ctx.emit(&viewerEvent{Name: "Heap", Phase: "C", Time: ctx.time(ev), Pid: 1, Arg: &heapCountersArg{ctx.heapStats.heapAlloc, diff}})
|
||||
ctx.emit(&ViewerEvent{Name: "Heap", Phase: "C", Time: ctx.time(ev), Pid: 1, Arg: &heapCountersArg{ctx.heapStats.heapAlloc, diff}})
|
||||
}
|
||||
ctx.prevHeapStats = ctx.heapStats
|
||||
}
|
||||
@ -1021,7 +1020,7 @@ func (ctx *traceContext) emitGoroutineCounters(ev *trace.Event) {
|
||||
return
|
||||
}
|
||||
if tsWithinRange(ev.Ts, ctx.startTime, ctx.endTime) {
|
||||
ctx.emit(&viewerEvent{Name: "Goroutines", Phase: "C", Time: ctx.time(ev), Pid: 1, Arg: &goroutineCountersArg{uint64(ctx.gstates[gRunning]), uint64(ctx.gstates[gRunnable]), uint64(ctx.gstates[gWaitingGC])}})
|
||||
ctx.emit(&ViewerEvent{Name: "Goroutines", Phase: "C", Time: ctx.time(ev), Pid: 1, Arg: &goroutineCountersArg{uint64(ctx.gstates[gRunning]), uint64(ctx.gstates[gRunnable]), uint64(ctx.gstates[gWaitingGC])}})
|
||||
}
|
||||
ctx.prevGstates = ctx.gstates
|
||||
}
|
||||
@ -1036,7 +1035,7 @@ func (ctx *traceContext) emitThreadCounters(ev *trace.Event) {
|
||||
return
|
||||
}
|
||||
if tsWithinRange(ev.Ts, ctx.startTime, ctx.endTime) {
|
||||
ctx.emit(&viewerEvent{Name: "Threads", Phase: "C", Time: ctx.time(ev), Pid: 1, Arg: &threadCountersArg{
|
||||
ctx.emit(&ViewerEvent{Name: "Threads", Phase: "C", Time: ctx.time(ev), Pid: 1, Arg: &threadCountersArg{
|
||||
Running: ctx.threadStats.prunning,
|
||||
InSyscall: ctx.threadStats.insyscall}})
|
||||
}
|
||||
@ -1074,14 +1073,14 @@ func (ctx *traceContext) emitInstant(ev *trace.Event, name, category string) {
|
||||
}
|
||||
arg = &Arg{ev.Args[0]}
|
||||
}
|
||||
ctx.emit(&viewerEvent{
|
||||
ctx.emit(&ViewerEvent{
|
||||
Name: name,
|
||||
Category: category,
|
||||
Phase: "I",
|
||||
Scope: "t",
|
||||
Time: ctx.time(ev),
|
||||
Tid: ctx.proc(ev),
|
||||
Stack: ctx.stack(ctx.res.Stacks[ev.StkID]),
|
||||
Stack: ctx.stack(ev.Stk),
|
||||
Cname: cname,
|
||||
Arg: arg})
|
||||
}
|
||||
@ -1118,11 +1117,8 @@ func (ctx *traceContext) emitArrow(ev *trace.Event, name string) {
|
||||
}
|
||||
|
||||
ctx.arrowSeq++
|
||||
ctx.emit(&viewerEvent{Name: name, Phase: "s", Tid: ctx.proc(ev),
|
||||
ID: ctx.arrowSeq, Time: ctx.time(ev),
|
||||
Stack: ctx.stack(ctx.res.Stacks[ev.StkID]), Cname: color})
|
||||
ctx.emit(&viewerEvent{Name: name, Phase: "t", Tid: ctx.proc(ev.Link),
|
||||
ID: ctx.arrowSeq, Time: ctx.time(ev.Link), Cname: color})
|
||||
ctx.emit(&ViewerEvent{Name: name, Phase: "s", Tid: ctx.proc(ev), ID: ctx.arrowSeq, Time: ctx.time(ev), Stack: ctx.stack(ev.Stk), Cname: color})
|
||||
ctx.emit(&ViewerEvent{Name: name, Phase: "t", Tid: ctx.proc(ev.Link), ID: ctx.arrowSeq, Time: ctx.time(ev.Link), Cname: color})
|
||||
}
|
||||
|
||||
func (ctx *traceContext) stack(stk []*trace.Frame) int {
|
||||
@ -1144,7 +1140,7 @@ func (ctx *traceContext) buildBranch(parent frameNode, stk []*trace.Frame) int {
|
||||
node.id = ctx.frameSeq
|
||||
node.children = make(map[uint64]frameNode)
|
||||
parent.children[frame.PC] = node
|
||||
ctx.consumer.consumeViewerFrame(strconv.Itoa(node.id), viewerFrame{fmt.Sprintf("%v:%v", frame.Fn, frame.Line), parent.id})
|
||||
ctx.consumer.consumeViewerFrame(strconv.Itoa(node.id), ViewerFrame{fmt.Sprintf("%v:%v", frame.Fn, frame.Line), parent.id})
|
||||
}
|
||||
return ctx.buildBranch(node, stk)
|
||||
}
|
||||
@ -1179,7 +1175,7 @@ type jsonWriter struct {
|
||||
}
|
||||
|
||||
func viewerDataTraceConsumer(w io.Writer, start, end int64) traceConsumer {
|
||||
frames := make(map[string]viewerFrame)
|
||||
frames := make(map[string]ViewerFrame)
|
||||
enc := json.NewEncoder(w)
|
||||
written := 0
|
||||
index := int64(-1)
|
||||
@ -1191,7 +1187,7 @@ func viewerDataTraceConsumer(w io.Writer, start, end int64) traceConsumer {
|
||||
enc.Encode(unit)
|
||||
io.WriteString(w, ",")
|
||||
},
|
||||
consumeViewerEvent: func(v *viewerEvent, required bool) {
|
||||
consumeViewerEvent: func(v *ViewerEvent, required bool) {
|
||||
index++
|
||||
if !required && (index < start || index > end) {
|
||||
// not in the range. Skip!
|
||||
@ -1208,7 +1204,7 @@ func viewerDataTraceConsumer(w io.Writer, start, end int64) traceConsumer {
|
||||
// Same should be applied to splittingTraceConsumer.
|
||||
written++
|
||||
},
|
||||
consumeViewerFrame: func(k string, v viewerFrame) {
|
||||
consumeViewerFrame: func(k string, v ViewerFrame) {
|
||||
frames[k] = v
|
||||
},
|
||||
flush: func() {
|
||||
|
@ -8,27 +8,26 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"internal/trace"
|
||||
"io/ioutil"
|
||||
rtrace "runtime/trace"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
trace "internal/traceparser"
|
||||
)
|
||||
|
||||
// stacks is a fake stack map populated for test.
|
||||
type stacks map[uint32][]*trace.Frame
|
||||
type stacks map[uint64][]*trace.Frame
|
||||
|
||||
// add adds a stack with a single frame whose Fn field is
|
||||
// set to the provided fname and returns a unique stack id.
|
||||
func (s *stacks) add(fname string) uint64 {
|
||||
if *s == nil {
|
||||
*s = make(map[uint32][]*trace.Frame)
|
||||
*s = make(map[uint64][]*trace.Frame)
|
||||
}
|
||||
|
||||
id := uint32(len(*s))
|
||||
id := uint64(len(*s))
|
||||
(*s)[id] = []*trace.Frame{{Fn: fname}}
|
||||
return uint64(id)
|
||||
return id
|
||||
}
|
||||
|
||||
// TestGoroutineCount tests runnable/running goroutine counts computed by generateTrace
|
||||
@ -37,7 +36,8 @@ func (s *stacks) add(fname string) uint64 {
|
||||
// - the counts must not include goroutines blocked waiting on channels or in syscall.
|
||||
func TestGoroutineCount(t *testing.T) {
|
||||
w := trace.NewWriter()
|
||||
w.Emit(trace.EvBatch, 0, 0) // start of per-P batch event [pid, timestamp]
|
||||
w.Emit(trace.EvBatch, 0, 0) // start of per-P batch event [pid, timestamp]
|
||||
w.Emit(trace.EvFrequency, 1) // [ticks per second]
|
||||
|
||||
var s stacks
|
||||
|
||||
@ -61,9 +61,8 @@ func TestGoroutineCount(t *testing.T) {
|
||||
w.Emit(trace.EvGoCreate, 1, 40, s.add("pkg.f4"), s.add("main.f4"))
|
||||
w.Emit(trace.EvGoStartLocal, 1, 40) // [timestamp, goroutine id]
|
||||
w.Emit(trace.EvGoSched, 1, s.add("main.f4")) // [timestamp, stack]
|
||||
w.Emit(trace.EvFrequency, 1) // [ticks per second]
|
||||
|
||||
res, err := trace.ParseBuffer(w)
|
||||
res, err := trace.Parse(w, "")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse test trace: %v", err)
|
||||
}
|
||||
@ -75,9 +74,9 @@ func TestGoroutineCount(t *testing.T) {
|
||||
}
|
||||
|
||||
// Use the default viewerDataTraceConsumer but replace
|
||||
// consumeViewerEvent to intercept the viewerEvents for testing.
|
||||
// consumeViewerEvent to intercept the ViewerEvents for testing.
|
||||
c := viewerDataTraceConsumer(ioutil.Discard, 0, 1<<63-1)
|
||||
c.consumeViewerEvent = func(ev *viewerEvent, _ bool) {
|
||||
c.consumeViewerEvent = func(ev *ViewerEvent, _ bool) {
|
||||
if ev.Name == "Goroutines" {
|
||||
cnt := ev.Arg.(*goroutineCountersArg)
|
||||
if cnt.Runnable+cnt.Running > 2 {
|
||||
@ -88,7 +87,7 @@ func TestGoroutineCount(t *testing.T) {
|
||||
}
|
||||
|
||||
// If the counts drop below 0, generateTrace will return an error.
|
||||
if err := generateTrace(res, params, c); err != nil {
|
||||
if err := generateTrace(params, c); err != nil {
|
||||
t.Fatalf("generateTrace failed: %v", err)
|
||||
}
|
||||
}
|
||||
@ -100,7 +99,8 @@ func TestGoroutineFilter(t *testing.T) {
|
||||
var s stacks
|
||||
|
||||
w := trace.NewWriter()
|
||||
w.Emit(trace.EvBatch, 0, 0) // start of per-P batch event [pid, timestamp]
|
||||
w.Emit(trace.EvBatch, 0, 0) // start of per-P batch event [pid, timestamp]
|
||||
w.Emit(trace.EvFrequency, 1) // [ticks per second]
|
||||
|
||||
// goroutine 10: blocked
|
||||
w.Emit(trace.EvGoCreate, 1, 10, s.add("pkg.f1"), s.add("main.f1")) // [timestamp, new goroutine id, new stack id, stack id]
|
||||
@ -115,9 +115,8 @@ func TestGoroutineFilter(t *testing.T) {
|
||||
// goroutine 10: runnable->running->block
|
||||
w.Emit(trace.EvGoStartLocal, 1, 10) // [timestamp, goroutine id]
|
||||
w.Emit(trace.EvGoBlock, 1, s.add("pkg.f3")) // [timestamp, stack]
|
||||
w.Emit(trace.EvFrequency, 1) // [ticks per second]
|
||||
|
||||
res, err := trace.ParseBuffer(w)
|
||||
res, err := trace.Parse(w, "")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse test trace: %v", err)
|
||||
}
|
||||
@ -130,14 +129,15 @@ func TestGoroutineFilter(t *testing.T) {
|
||||
}
|
||||
|
||||
c := viewerDataTraceConsumer(ioutil.Discard, 0, 1<<63-1)
|
||||
if err := generateTrace(res, params, c); err != nil {
|
||||
if err := generateTrace(params, c); err != nil {
|
||||
t.Fatalf("generateTrace failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPreemptedMarkAssist(t *testing.T) {
|
||||
w := trace.NewWriter()
|
||||
w.Emit(trace.EvBatch, 0, 0) // start of per-P batch event [pid, timestamp]
|
||||
w.Emit(trace.EvBatch, 0, 0) // start of per-P batch event [pid, timestamp]
|
||||
w.Emit(trace.EvFrequency, 1) // [ticks per second]
|
||||
|
||||
var s stacks
|
||||
// goroutine 9999: running -> mark assisting -> preempted -> assisting -> running -> block
|
||||
@ -148,13 +148,11 @@ func TestPreemptedMarkAssist(t *testing.T) {
|
||||
w.Emit(trace.EvGoStartLocal, 1, 9999) // [timestamp, goroutine id]
|
||||
w.Emit(trace.EvGCMarkAssistDone, 1) // [timestamp]
|
||||
w.Emit(trace.EvGoBlock, 1, s.add("main.f2")) // [timestamp, stack]
|
||||
w.Emit(trace.EvFrequency, 1) // [ticks per second]
|
||||
|
||||
res, err := trace.ParseBuffer(w)
|
||||
res, err := trace.Parse(w, "")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse test trace: %v", err)
|
||||
}
|
||||
t.Logf("%+v", *res)
|
||||
res.Stacks = s // use fake stacks
|
||||
|
||||
params := &traceParams{
|
||||
@ -165,12 +163,12 @@ func TestPreemptedMarkAssist(t *testing.T) {
|
||||
c := viewerDataTraceConsumer(ioutil.Discard, 0, 1<<63-1)
|
||||
|
||||
marks := 0
|
||||
c.consumeViewerEvent = func(ev *viewerEvent, _ bool) {
|
||||
c.consumeViewerEvent = func(ev *ViewerEvent, _ bool) {
|
||||
if strings.Contains(ev.Name, "MARK ASSIST") {
|
||||
marks++
|
||||
}
|
||||
}
|
||||
if err := generateTrace(res, params, c); err != nil {
|
||||
if err := generateTrace(params, c); err != nil {
|
||||
t.Fatalf("generateTrace failed: %v", err)
|
||||
}
|
||||
|
||||
@ -216,7 +214,7 @@ func TestFoo(t *testing.T) {
|
||||
c := viewerDataTraceConsumer(ioutil.Discard, 0, 1<<63-1)
|
||||
|
||||
var logBeforeTaskEnd, logAfterTaskEnd bool
|
||||
c.consumeViewerEvent = func(ev *viewerEvent, _ bool) {
|
||||
c.consumeViewerEvent = func(ev *ViewerEvent, _ bool) {
|
||||
if ev.Name == "log before task ends" {
|
||||
logBeforeTaskEnd = true
|
||||
}
|
||||
@ -224,7 +222,7 @@ func TestFoo(t *testing.T) {
|
||||
logAfterTaskEnd = true
|
||||
}
|
||||
}
|
||||
if err := generateTrace(res, params, c); err != nil {
|
||||
if err := generateTrace(params, c); err != nil {
|
||||
t.Fatalf("generateTrace failed: %v", err)
|
||||
}
|
||||
if !logBeforeTaskEnd {
|
||||
|
@ -8,7 +8,7 @@ package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"internal/traceparser"
|
||||
traceparser "internal/trace"
|
||||
"io/ioutil"
|
||||
"runtime"
|
||||
"runtime/trace"
|
||||
@ -73,15 +73,17 @@ func TestGoroutineInSyscall(t *testing.T) {
|
||||
}
|
||||
trace.Stop()
|
||||
|
||||
res, err := traceparser.ParseBuffer(buf)
|
||||
if err != nil {
|
||||
res, err := traceparser.Parse(buf, "")
|
||||
if err == traceparser.ErrTimeOrder {
|
||||
t.Skipf("skipping due to golang.org/issue/16755 (timestamps are unreliable): %v", err)
|
||||
} else if err != nil {
|
||||
t.Fatalf("failed to parse trace: %v", err)
|
||||
}
|
||||
|
||||
// Check only one thread for the pipe read goroutine is
|
||||
// considered in-syscall.
|
||||
c := viewerDataTraceConsumer(ioutil.Discard, 0, 1<<63-1)
|
||||
c.consumeViewerEvent = func(ev *viewerEvent, _ bool) {
|
||||
c.consumeViewerEvent = func(ev *ViewerEvent, _ bool) {
|
||||
if ev.Name == "Threads" {
|
||||
arg := ev.Arg.(*threadCountersArg)
|
||||
if arg.InSyscall > 1 {
|
||||
@ -94,7 +96,7 @@ func TestGoroutineInSyscall(t *testing.T) {
|
||||
parsed: res,
|
||||
endTime: int64(1<<63 - 1),
|
||||
}
|
||||
if err := generateTrace(res, param, c); err != nil {
|
||||
if err := generateTrace(param, c); err != nil {
|
||||
t.Fatalf("failed to generate ViewerData: %v", err)
|
||||
}
|
||||
}
|
||||
|
@ -272,9 +272,7 @@ var pkgDeps = map[string][]string{
|
||||
"index/suffixarray": {"L4", "regexp"},
|
||||
"internal/goroot": {"L4", "OS"},
|
||||
"internal/singleflight": {"sync"},
|
||||
"internal/trace": {"L4", "OS"},
|
||||
"internal/traceparser": {"L4", "internal/traceparser/filebuf", "container/heap"},
|
||||
"internal/traceparser/filebuf": {"L4", "OS"},
|
||||
"internal/trace": {"L4", "OS", "container/heap"},
|
||||
"math/big": {"L4"},
|
||||
"mime": {"L4", "OS", "syscall", "internal/syscall/windows/registry"},
|
||||
"mime/quotedprintable": {"L4"},
|
||||
|
@ -2,7 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package traceparser
|
||||
package trace
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
@ -50,8 +50,7 @@ const (
|
||||
//
|
||||
// If the UtilPerProc flag is not given, this always returns a single
|
||||
// utilization function. Otherwise, it returns one function per P.
|
||||
func (p *Parsed) MutatorUtilization(flags UtilFlags) [][]MutatorUtil {
|
||||
events := p.Events
|
||||
func MutatorUtilization(events []*Event, flags UtilFlags) [][]MutatorUtil {
|
||||
if len(events) == 0 {
|
||||
return nil
|
||||
}
|
||||
@ -474,6 +473,7 @@ func (acc *accumulator) addMU(time int64, mu float64, window time.Duration) bool
|
||||
}
|
||||
keep:
|
||||
}
|
||||
|
||||
if len(acc.wHeap) < acc.nWorst {
|
||||
// We don't have N windows yet, so keep accumulating.
|
||||
acc.bound = 1.0
|
@ -2,11 +2,12 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package traceparser
|
||||
package trace
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
@ -76,20 +77,18 @@ func TestMMU(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMMUTrace(t *testing.T) {
|
||||
if runtime.GOOS == "darwin" && (runtime.GOARCH == "arm" || runtime.GOARCH == "arm64") {
|
||||
t.Skipf("files from outside the package are not available on %s/%s", runtime.GOOS, runtime.GOARCH)
|
||||
}
|
||||
// Can't be t.Parallel() because it modifies the
|
||||
// testingOneBand package variable.
|
||||
|
||||
p, err := New("../trace/testdata/stress_1_10_good")
|
||||
data, err := ioutil.ReadFile("testdata/stress_1_10_good")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read input file: %v", err)
|
||||
}
|
||||
if err := p.Parse(0, 1<<62, nil); err != nil {
|
||||
_, events, err := parse(bytes.NewReader(data), "")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse trace: %s", err)
|
||||
}
|
||||
mu := p.MutatorUtilization(UtilSTW | UtilBackground | UtilAssist)
|
||||
mu := MutatorUtilization(events.Events, UtilSTW|UtilBackground|UtilAssist)
|
||||
mmuCurve := NewMMUCurve(mu)
|
||||
|
||||
// Test the optimized implementation against the "obviously
|
||||
@ -123,14 +122,15 @@ func TestMMUTrace(t *testing.T) {
|
||||
}
|
||||
|
||||
func BenchmarkMMU(b *testing.B) {
|
||||
p, err := New("../trace/testdata/stress_1_10_good")
|
||||
data, err := ioutil.ReadFile("testdata/stress_1_10_good")
|
||||
if err != nil {
|
||||
b.Fatalf("failed to read input file: %v", err)
|
||||
}
|
||||
if err := p.Parse(0, 1<<62, nil); err != nil {
|
||||
_, events, err := parse(bytes.NewReader(data), "")
|
||||
if err != nil {
|
||||
b.Fatalf("failed to parse trace: %s", err)
|
||||
}
|
||||
mu := p.MutatorUtilization(UtilSTW | UtilBackground | UtilAssist | UtilSweep)
|
||||
mu := MutatorUtilization(events.Events, UtilSTW|UtilBackground|UtilAssist|UtilSweep)
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
@ -2,7 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package traceparser
|
||||
package trace
|
||||
|
||||
import (
|
||||
"math"
|
@ -2,7 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package traceparser
|
||||
package trace
|
||||
|
||||
import (
|
||||
"math/rand"
|
@ -1,313 +0,0 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package traceparser
|
||||
|
||||
// postProcess is a final check of consistency, and if all is well,
|
||||
// adds links to Events
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type gStatus int
|
||||
|
||||
const (
|
||||
gDead gStatus = iota
|
||||
gRunnable
|
||||
gRunning
|
||||
gWaiting
|
||||
)
|
||||
|
||||
// This code is copied from internal/trace/parser.go. With greater understanding it could be
|
||||
// simplified. Sets ev.P for GCStart, and set various Link fields
|
||||
func (p *Parsed) postProcess(events []*Event) error {
|
||||
type gdesc struct {
|
||||
state gStatus
|
||||
ev *Event
|
||||
evStart *Event
|
||||
evCreate *Event
|
||||
evMarkAssist *Event
|
||||
}
|
||||
type pdesc struct {
|
||||
running bool
|
||||
g uint64
|
||||
evSTW *Event
|
||||
evSweep *Event
|
||||
}
|
||||
|
||||
gs := make(map[uint64]gdesc)
|
||||
ps := make(map[int]pdesc)
|
||||
tasks := make(map[uint64]*Event) // task id to task creation events
|
||||
activeRegions := make(map[uint64][]*Event) // goroutine id to stack of spans
|
||||
gs[0] = gdesc{state: gRunning}
|
||||
var evGC, evSTW *Event
|
||||
|
||||
checkRunning := func(pd pdesc, g gdesc, ev *Event, allowG0 bool) error {
|
||||
if g.state != gRunning {
|
||||
return fmt.Errorf("saw %v, but g %d is not running", ev, ev.G)
|
||||
}
|
||||
if pd.g != ev.G {
|
||||
return fmt.Errorf("saw %v, but it's P is running %d, not %d", ev, pd.g, ev.G)
|
||||
}
|
||||
if !allowG0 && ev.G == 0 {
|
||||
return fmt.Errorf("saw %v with unexpected g==0", ev)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
for i, ev := range events {
|
||||
g := gs[ev.G]
|
||||
px := ps[int(ev.P)]
|
||||
switch ev.Type {
|
||||
case EvProcStart:
|
||||
if px.running {
|
||||
return fmt.Errorf("%d: running before start %s", i, ev)
|
||||
}
|
||||
px.running = true
|
||||
case EvProcStop:
|
||||
if !px.running {
|
||||
return fmt.Errorf("%d: p %d not running %s", i, ev.P, ev)
|
||||
}
|
||||
if px.g != 0 {
|
||||
return fmt.Errorf("p %d is running a goroutine %s", ev.P, ev)
|
||||
}
|
||||
px.running = false
|
||||
case EvGCStart:
|
||||
if evGC != nil {
|
||||
return fmt.Errorf("GC already running %s, was %s", ev, evGC)
|
||||
}
|
||||
evGC = ev
|
||||
// Attribute this to the global GC state.
|
||||
ev.P = GCP
|
||||
case EvGCDone:
|
||||
if evGC == nil {
|
||||
return fmt.Errorf("%d:%s bogus GC end", i, ev)
|
||||
}
|
||||
evGC.Link = ev
|
||||
evGC = nil
|
||||
case EvGCSTWStart:
|
||||
evp := &evSTW
|
||||
if p.Version < 1010 {
|
||||
// Before 1.10, EvGCSTWStart was per-P.
|
||||
evp = &px.evSTW
|
||||
}
|
||||
if *evp != nil {
|
||||
return fmt.Errorf("STW %s still running at %s", *evp, ev)
|
||||
}
|
||||
*evp = ev
|
||||
case EvGCSTWDone:
|
||||
evp := &evSTW
|
||||
if p.Version < 1010 {
|
||||
// Before 1.10, EvGCSTWDone was per-P.
|
||||
evp = &px.evSTW
|
||||
}
|
||||
if *evp == nil {
|
||||
return fmt.Errorf("%d: no STW running %s", i, ev)
|
||||
}
|
||||
(*evp).Link = ev
|
||||
*evp = nil
|
||||
case EvGCMarkAssistStart:
|
||||
if g.evMarkAssist != nil {
|
||||
return fmt.Errorf("%d: MarkAssist %s is still running at %s",
|
||||
i, g.evMarkAssist, ev)
|
||||
}
|
||||
g.evMarkAssist = ev
|
||||
case EvGCMarkAssistDone:
|
||||
// Unlike most events, mark assists can be in progress when a
|
||||
// goroutine starts tracing, so we can't report an error here.
|
||||
if g.evMarkAssist != nil {
|
||||
g.evMarkAssist.Link = ev
|
||||
g.evMarkAssist = nil
|
||||
}
|
||||
case EvGCSweepStart:
|
||||
if px.evSweep != nil {
|
||||
return fmt.Errorf("sweep not done %d: %s", i, ev)
|
||||
}
|
||||
px.evSweep = ev
|
||||
case EvGCSweepDone:
|
||||
if px.evSweep == nil {
|
||||
return fmt.Errorf("%d: no sweep happening %s", i, ev)
|
||||
}
|
||||
px.evSweep.Link = ev
|
||||
px.evSweep = nil
|
||||
case EvGoWaiting:
|
||||
if g.state != gRunnable {
|
||||
return fmt.Errorf("not runnable before %d:%s", i, ev)
|
||||
}
|
||||
g.state = gWaiting
|
||||
g.ev = ev
|
||||
case EvGoInSyscall:
|
||||
if g.state != gRunnable {
|
||||
return fmt.Errorf("not runnable before %d:%s", i, ev)
|
||||
}
|
||||
g.state = gWaiting
|
||||
g.ev = ev
|
||||
case EvGoCreate:
|
||||
if err := checkRunning(px, g, ev, true); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, ok := gs[ev.Args[0]]; ok {
|
||||
return fmt.Errorf("%d: already exists %s", i, ev)
|
||||
}
|
||||
gs[ev.Args[0]] = gdesc{state: gRunnable, ev: ev, evCreate: ev}
|
||||
case EvGoStart, EvGoStartLabel:
|
||||
if g.state != gRunnable {
|
||||
return fmt.Errorf("not runnable before start %d:%s %+v", i, ev, g)
|
||||
}
|
||||
if px.g != 0 {
|
||||
return fmt.Errorf("%d: %s has p running %d already %v", i, ev, px.g, px)
|
||||
}
|
||||
g.state = gRunning
|
||||
g.evStart = ev
|
||||
px.g = ev.G
|
||||
if g.evCreate != nil {
|
||||
if p.Version < 1007 {
|
||||
// +1 because symbolizer expects return pc.
|
||||
//PJW: aren't doing < 1007. ev.stk = []*Frame{{PC: g.evCreate.args[1] + 1}}
|
||||
} else {
|
||||
ev.StkID = uint32(g.evCreate.Args[1])
|
||||
}
|
||||
g.evCreate = nil
|
||||
}
|
||||
|
||||
if g.ev != nil {
|
||||
g.ev.Link = ev
|
||||
g.ev = nil
|
||||
}
|
||||
case EvGoEnd, EvGoStop:
|
||||
if err := checkRunning(px, g, ev, false); err != nil {
|
||||
return fmt.Errorf("%d: %s", i, err)
|
||||
}
|
||||
g.evStart.Link = ev
|
||||
g.evStart = nil
|
||||
g.state = gDead
|
||||
px.g = 0
|
||||
|
||||
if ev.Type == EvGoEnd { // flush all active Regions
|
||||
spans := activeRegions[ev.G]
|
||||
for _, s := range spans {
|
||||
s.Link = ev
|
||||
}
|
||||
delete(activeRegions, ev.G)
|
||||
}
|
||||
case EvGoSched, EvGoPreempt:
|
||||
if err := checkRunning(px, g, ev, false); err != nil {
|
||||
return err
|
||||
}
|
||||
g.state = gRunnable
|
||||
g.evStart.Link = ev
|
||||
g.evStart = nil
|
||||
px.g = 0
|
||||
g.ev = ev
|
||||
case EvGoUnblock:
|
||||
if g.state != gRunning { // PJW, error message
|
||||
return fmt.Errorf("Event %d (%s) is not running at unblock %s", i, ev, g.state)
|
||||
|
||||
}
|
||||
if ev.P != TimerP && px.g != ev.G {
|
||||
// PJW: do better here.
|
||||
return fmt.Errorf("%d: %s p %d is not running g", i, ev, px.g)
|
||||
}
|
||||
g1 := gs[ev.Args[0]]
|
||||
if g1.state != gWaiting {
|
||||
return fmt.Errorf("g %v is not waiting before unpark i=%d g1=%v %s",
|
||||
ev.Args[0], i, g1, ev)
|
||||
}
|
||||
if g1.ev != nil && g1.ev.Type == EvGoBlockNet && ev.P != TimerP {
|
||||
ev.P = NetpollP
|
||||
}
|
||||
if g1.ev != nil {
|
||||
g1.ev.Link = ev
|
||||
}
|
||||
g1.state = gRunnable
|
||||
g1.ev = ev
|
||||
gs[ev.Args[0]] = g1
|
||||
case EvGoSysCall:
|
||||
if err := checkRunning(px, g, ev, false); err != nil {
|
||||
return err
|
||||
}
|
||||
g.ev = ev
|
||||
case EvGoSysBlock:
|
||||
if err := checkRunning(px, g, ev, false); err != nil {
|
||||
return err
|
||||
}
|
||||
g.state = gWaiting
|
||||
g.evStart.Link = ev
|
||||
g.evStart = nil
|
||||
px.g = 0
|
||||
case EvGoSysExit:
|
||||
if g.state != gWaiting {
|
||||
return fmt.Errorf("not waiting when %s", ev)
|
||||
}
|
||||
if g.ev != nil && g.ev.Type == EvGoSysCall {
|
||||
g.ev.Link = ev
|
||||
}
|
||||
g.state = gRunnable
|
||||
g.ev = ev
|
||||
case EvGoSleep, EvGoBlock, EvGoBlockSend, EvGoBlockRecv,
|
||||
EvGoBlockSelect, EvGoBlockSync, EvGoBlockCond, EvGoBlockNet, EvGoBlockGC:
|
||||
if err := checkRunning(px, g, ev, false); err != nil {
|
||||
return err
|
||||
}
|
||||
g.state = gWaiting
|
||||
g.ev = ev
|
||||
g.evStart.Link = ev
|
||||
g.evStart = nil
|
||||
px.g = 0
|
||||
case EvUserTaskCreate:
|
||||
taskid := ev.Args[0]
|
||||
if prevEv, ok := tasks[taskid]; ok {
|
||||
return fmt.Errorf("task id conflicts (id:%d), %q vs %q", taskid, ev, prevEv)
|
||||
}
|
||||
tasks[ev.Args[0]] = ev
|
||||
case EvUserTaskEnd:
|
||||
if prevEv, ok := tasks[ev.Args[0]]; ok {
|
||||
prevEv.Link = ev
|
||||
ev.Link = prevEv
|
||||
}
|
||||
case EvUserRegion:
|
||||
mode := ev.Args[1]
|
||||
spans := activeRegions[ev.G]
|
||||
if mode == 0 { // span start
|
||||
activeRegions[ev.G] = append(spans, ev) // push
|
||||
} else if mode == 1 { // span end
|
||||
n := len(spans)
|
||||
if n > 0 { // matching span start event is in the trace.
|
||||
s := spans[n-1]
|
||||
if s.Args[0] != ev.Args[0] || s.SArgs[0] != ev.SArgs[0] { // task id, span name mismatch
|
||||
return fmt.Errorf("misuse of span in goroutine %d: span end %q when the inner-most active span start event is %q",
|
||||
ev.G, ev, s)
|
||||
}
|
||||
// Link span start event with span end event
|
||||
s.Link = ev
|
||||
ev.Link = s
|
||||
|
||||
if n > 1 {
|
||||
activeRegions[ev.G] = spans[:n-1]
|
||||
} else {
|
||||
delete(activeRegions, ev.G)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("invalid user region, mode: %q", ev)
|
||||
}
|
||||
}
|
||||
gs[ev.G] = g
|
||||
ps[int(ev.P)] = px
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (g gStatus) String() string {
|
||||
switch g {
|
||||
case gDead:
|
||||
return "gDead"
|
||||
case gRunnable:
|
||||
return "gRunnable"
|
||||
case gRunning:
|
||||
return "gRunning"
|
||||
case gWaiting:
|
||||
return "gWaiting"
|
||||
}
|
||||
return fmt.Sprintf("gStatus?%d", g)
|
||||
}
|
@ -1,312 +0,0 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package traceparser
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// convert raw events into Events
|
||||
|
||||
func (p *Parsed) createEvents(f func(string)) error {
|
||||
// multiple passes:
|
||||
// create some Events
|
||||
// sort them by time (and adjust their times to be nanonseconds)
|
||||
// remove events not in the desired time interval
|
||||
// make the events consistent (adding initializing events at the beginning)
|
||||
// remove the futile events
|
||||
// add links (and do final checking)
|
||||
|
||||
// shared by firstEvents
|
||||
p.byproc = make(map[int][]*Event)
|
||||
p.lastGs = make(map[int]uint64)
|
||||
|
||||
// p.batches are always sorted by time. otherwise a batch for one p that is totally
|
||||
// later than another batch might be done first, confusing us about g's
|
||||
for i, b := range p.batches {
|
||||
if b.raws == nil {
|
||||
continue
|
||||
}
|
||||
if err := p.firstEvents(b); err != nil {
|
||||
return fmt.Errorf("%v", err) // PJW: this is not useful
|
||||
}
|
||||
// we done with b.raws now
|
||||
p.batches[i].raws = nil
|
||||
}
|
||||
f("firstEvents finished")
|
||||
sorted := []*Event{}
|
||||
for _, v := range p.byproc {
|
||||
sorted = append(sorted, v...)
|
||||
}
|
||||
// PJW: are we done with p.byproc now? Yes. This shrinks a little.
|
||||
p.byproc = nil
|
||||
// Why wasn't this done earlier? Or, why do it at all?
|
||||
for _, ev := range sorted {
|
||||
switch ev.Type {
|
||||
case EvGoStartLocal:
|
||||
ev.Type = EvGoStart
|
||||
case EvGoUnblockLocal:
|
||||
ev.Type = EvGoUnblock
|
||||
case EvGoSysExitLocal:
|
||||
ev.Type = EvGoSysExit
|
||||
}
|
||||
}
|
||||
// change to nanoseconds
|
||||
freq := 1e9 / float64(p.TicksPerSec)
|
||||
for i, ev := range sorted {
|
||||
// Move timers and syscalls to separate fake Ps.
|
||||
// This could be done in the loop at line 38
|
||||
// or maybe after robust fixes things.
|
||||
if p.timerGoids[ev.G] && ev.Type == EvGoUnblock {
|
||||
ev.Args[2] = uint64(ev.P) // save for robust() to use
|
||||
ev.P = TimerP
|
||||
}
|
||||
// sometimes the ts is not what it should be
|
||||
if ev.Type == EvGoSysExit {
|
||||
ev.P = SyscallP
|
||||
if ev.Args[2] != 0 {
|
||||
// PJW: test for this being safe. There might be no preceding
|
||||
// EvSysBlock, EvGoInSyscall, or its time might be later than this
|
||||
ev.Ts = int64(ev.Args[2])
|
||||
}
|
||||
}
|
||||
if ev.Type == EvGCStart {
|
||||
ev.P = GCP
|
||||
}
|
||||
t := ev.Ts - p.minticks
|
||||
if t < 0 {
|
||||
return fmt.Errorf("event %d %s would be %d mints=%x", i, ev, t, p.minticks)
|
||||
}
|
||||
ev.Ts = int64(float64(ev.Ts-p.minticks) * freq)
|
||||
}
|
||||
// Stable for the case of equal Ts's.
|
||||
sort.SliceStable(sorted, func(i, j int) bool { return sorted[i].Ts < sorted[j].Ts })
|
||||
f("sorted")
|
||||
// and ignore the ones with times out of bounds
|
||||
firstwant, lastwant := 0, len(sorted)
|
||||
for i, ev := range sorted {
|
||||
if ev.Ts < p.MinWant {
|
||||
firstwant = i + 1
|
||||
} else if ev.Ts > p.MaxWant { // closed interval [minwant, maxwant]
|
||||
lastwant = i
|
||||
break // sorted by Ts
|
||||
}
|
||||
}
|
||||
f("nanoseconds")
|
||||
var err error
|
||||
sorted, _, err = p.robust(sorted[firstwant:lastwant]) // PJW: copy info from aux
|
||||
f("consistent")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
events, cnt := p.removeFutile(sorted) // err is always nil here.
|
||||
f(fmt.Sprintf("removed %d futiles", cnt))
|
||||
// and finally, do some checks and put in links
|
||||
err = p.postProcess(events)
|
||||
f("post processed")
|
||||
if err != nil {
|
||||
return err // PJW: is this enough? NO
|
||||
}
|
||||
p.Events = events
|
||||
return nil
|
||||
}
|
||||
|
||||
// Special P identifiers.
|
||||
const (
|
||||
FakeP = 1000000 + iota
|
||||
TimerP // depicts timer unblocks
|
||||
NetpollP // depicts network unblocks
|
||||
SyscallP // depicts returns from syscalls
|
||||
GCP // depicts GC state
|
||||
)
|
||||
|
||||
// convert the raw events for a batch into Events, and keep track of
|
||||
// which G is running on the P that is common to the batch.
|
||||
func (p *Parsed) firstEvents(b batch) error {
|
||||
for _, raw := range b.raws {
|
||||
desc := EventDescriptions[raw.typ]
|
||||
narg := p.rawArgNum(&raw)
|
||||
if p.Err != nil {
|
||||
return p.Err
|
||||
}
|
||||
if raw.typ == EvBatch {
|
||||
// first event, record information about P, G, and Ts
|
||||
p.lastGs[p.lastP] = p.lastG // 0 the first time through
|
||||
p.lastP = int(raw.Arg(0))
|
||||
p.lastG = p.lastGs[p.lastP]
|
||||
p.lastTs = int64(raw.Arg(1))
|
||||
continue
|
||||
}
|
||||
e := &Event{Type: raw.typ, P: int32(p.lastP), G: p.lastG}
|
||||
var argoffset int
|
||||
if p.Version < 1007 { // can't happen.
|
||||
e.Ts = p.lastTs + int64(raw.Arg(1))
|
||||
argoffset = 2
|
||||
} else {
|
||||
e.Ts = p.lastTs + int64(raw.Arg(0))
|
||||
argoffset = 1
|
||||
}
|
||||
p.lastTs = e.Ts
|
||||
// collect the args for the raw event e
|
||||
for i := argoffset; i < narg; i++ {
|
||||
// evade one byte of corruption (from fuzzing typically)
|
||||
if raw.args == nil {
|
||||
return fmt.Errorf("raw.args is nil %s", evname(raw.typ))
|
||||
}
|
||||
if i > 0 && i-1 >= len(*raw.args) {
|
||||
return fmt.Errorf("%s wants arg %d of %d", evname(raw.typ), i, len(*raw.args))
|
||||
}
|
||||
if i == narg-1 && desc.Stack {
|
||||
e.StkID = uint32(raw.Arg(i))
|
||||
} else {
|
||||
e.Args[i-argoffset] = raw.Arg(i)
|
||||
}
|
||||
}
|
||||
switch raw.typ {
|
||||
case EvGoSysCall, EvGCSweepDone, EvGCSweepStart:
|
||||
if e.G == 0 {
|
||||
// missing some earlier G's from this P
|
||||
continue // so we don't know which G is doing it
|
||||
}
|
||||
case EvGoStart, EvGoStartLocal, EvGoStartLabel:
|
||||
p.lastG = e.Args[0]
|
||||
e.G = p.lastG
|
||||
if raw.typ == EvGoStartLabel {
|
||||
e.SArgs = []string{p.Strings[e.Args[2]]}
|
||||
}
|
||||
case EvGCSTWStart:
|
||||
e.G = 0
|
||||
switch e.Args[0] {
|
||||
case 0:
|
||||
e.SArgs = []string{"mark termination"}
|
||||
case 1:
|
||||
e.SArgs = []string{"sweep termination"}
|
||||
default:
|
||||
return fmt.Errorf("unknown STW kind %d!=0,1 %s", e.Args[0], e)
|
||||
}
|
||||
case EvGCStart, EvGCDone, EvGCSTWDone:
|
||||
e.G = 0
|
||||
case EvGoEnd, EvGoStop, EvGoSched, EvGoPreempt,
|
||||
EvGoSleep, EvGoBlock, EvGoBlockSend, EvGoBlockRecv,
|
||||
EvGoBlockSelect, EvGoBlockSync, EvGoBlockCond, EvGoBlockNet,
|
||||
EvGoSysBlock, EvGoBlockGC:
|
||||
p.lastG = 0
|
||||
if e.G == 0 {
|
||||
// missing some earlier G's from this P
|
||||
continue // so we don't know which G is doing it
|
||||
}
|
||||
case EvGoSysExit, EvGoWaiting, EvGoInSyscall:
|
||||
e.G = e.Args[0]
|
||||
case EvUserTaskCreate:
|
||||
// e.Args 0: taskID, 1:parentID, 2:nameID
|
||||
e.SArgs = []string{p.Strings[e.Args[2]]}
|
||||
case EvUserRegion:
|
||||
if e.G == 0 {
|
||||
continue // don't know its G
|
||||
}
|
||||
// e.Args 0: taskID, 1: mode, 2:nameID
|
||||
e.SArgs = []string{p.Strings[e.Args[2]]}
|
||||
case EvUserLog:
|
||||
// e.Args 0: taskID, 1:keyID, 2: stackID
|
||||
e.SArgs = []string{p.Strings[e.Args[1]], raw.sarg}
|
||||
}
|
||||
p.byproc[p.lastP] = append(p.byproc[p.lastP], e)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Parsed) removeFutile(events []*Event) ([]*Event, int) {
|
||||
// Phase 1: determine futile wakeup sequences.
|
||||
type G struct {
|
||||
futile bool
|
||||
wakeup []*Event // wakeup sequence (subject for removal)
|
||||
}
|
||||
gs := make(map[uint64]G)
|
||||
futile := make(map[*Event]bool)
|
||||
cnt := 0
|
||||
for _, ev := range events {
|
||||
switch ev.Type {
|
||||
case EvGoUnblock:
|
||||
g := gs[ev.Args[0]]
|
||||
g.wakeup = []*Event{ev}
|
||||
gs[ev.Args[0]] = g
|
||||
case EvGoStart, EvGoPreempt, EvFutileWakeup:
|
||||
g := gs[ev.G]
|
||||
g.wakeup = append(g.wakeup, ev)
|
||||
if ev.Type == EvFutileWakeup {
|
||||
g.futile = true
|
||||
}
|
||||
gs[ev.G] = g
|
||||
case EvGoBlock, EvGoBlockSend, EvGoBlockRecv, EvGoBlockSelect,
|
||||
EvGoBlockSync, EvGoBlockCond:
|
||||
g := gs[ev.G]
|
||||
if g.futile {
|
||||
futile[ev] = true
|
||||
for _, ev1 := range g.wakeup {
|
||||
futile[ev1] = true
|
||||
}
|
||||
}
|
||||
delete(gs, ev.G)
|
||||
cnt++
|
||||
}
|
||||
}
|
||||
// Phase 2: remove futile wakeup sequences.
|
||||
newEvents := events[:0] // overwrite the original slice
|
||||
for _, ev := range events {
|
||||
if !futile[ev] {
|
||||
newEvents = append(newEvents, ev)
|
||||
}
|
||||
}
|
||||
return newEvents, cnt // PJW: cnt doesn't count the futile[]s
|
||||
}
|
||||
|
||||
// Arg gets the n-th arg from a raw event
|
||||
func (r *rawEvent) Arg(n int) uint64 {
|
||||
if n == 0 {
|
||||
return r.arg0
|
||||
}
|
||||
return (*r.args)[n-1]
|
||||
}
|
||||
|
||||
// report the number of arguments. (historical differences)
|
||||
func (p *Parsed) rawArgNum(ev *rawEvent) int {
|
||||
desc := EventDescriptions[ev.typ]
|
||||
switch ev.typ {
|
||||
case EvStack, EvFrequency, EvTimerGoroutine:
|
||||
p.Err = fmt.Errorf("%s unexpected in rawArgNum", evname(ev.typ))
|
||||
return 0
|
||||
}
|
||||
narg := len(desc.Args)
|
||||
if desc.Stack {
|
||||
narg++
|
||||
}
|
||||
if ev.typ == EvBatch {
|
||||
if p.Version < 1007 {
|
||||
narg++ // used to be an extra unused arg
|
||||
}
|
||||
return narg
|
||||
}
|
||||
narg++ // timestamp
|
||||
if p.Version < 1007 {
|
||||
narg++ // sequence
|
||||
}
|
||||
// various special historical cases
|
||||
switch ev.typ {
|
||||
case EvGCSweepDone:
|
||||
if p.Version < 1009 {
|
||||
narg -= 2 // 1.9 added 2 args
|
||||
}
|
||||
case EvGCStart, EvGoStart, EvGoUnblock:
|
||||
if p.Version < 1007 {
|
||||
narg-- // one more since 1.7
|
||||
}
|
||||
case EvGCSTWStart:
|
||||
if p.Version < 1010 {
|
||||
narg-- // 1.10 added an argument
|
||||
}
|
||||
}
|
||||
return narg
|
||||
}
|
@ -1,247 +0,0 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package traceparser
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// scan the trace file finding the header, starts of batches, and the trailer.
|
||||
// the trailer contains strings, stacks, and the clock frequency
|
||||
|
||||
// There are two ways of thinking about the raw trace file. It starts with a 16
|
||||
// byte header "go 1.11 trace\0\0\0"
|
||||
// From the point of
|
||||
// view of the runtime, there is a collection of initializations for each goroutine.
|
||||
// These consist of an EvGoCreate, possibly followed by one of EvGoWaiting or
|
||||
// EvGoInSyscall if the go routine is waiting or in a syscall.
|
||||
// Then there is an EvProcStart for the first running goroutine, so there's a running P,
|
||||
// and then an EvGoStart for the first running goroutine. Then as the program runs, the
|
||||
// runtime emits trace events. Finally when the tracing stops, the runtime emits a footer
|
||||
// consisting of an EvFrequency (to convert ticks to nanoseconds) and some EvTimerGoroutines,
|
||||
// followed by EvStacks for all the stack frames.
|
||||
//
|
||||
// In the file, the header and footer are as described, but all the events in between come
|
||||
// in batches headed by EvBatch with the same P, and have to be rearranged into timestamp order.
|
||||
|
||||
// New() scans once through the file to find the beginnings of all the batches (EvBatch) and
|
||||
// processes the footer extracting the strings and stacks.
|
||||
// Parse() finds the batches that overlap the desired time interval, and processes them into
|
||||
// events, dropping those outside the desired time interval. But it has to derive the missing
|
||||
// initializations from the events it sees, as it has no other access to the state of the runtime.
|
||||
// This is done in robust.go.
|
||||
|
||||
// In more detail, scanFile() is called by commonInit() which is called by either New() or ParseBuffer().
|
||||
// It extracts the strings, the stacks, and remembers the locations of the Batches (all saved in *Parsed).
|
||||
|
||||
// Parse first computes the rawEvents for the batches that overlap the requested interval.
|
||||
// It then calls createEvents() (events.go) which produces Events.
|
||||
|
||||
func (p *Parsed) parseHeader() error {
|
||||
p.r.Seek(0, 0)
|
||||
var buf [16]byte
|
||||
n, err := p.r.Read(buf[:])
|
||||
if n != 16 || err != nil {
|
||||
return fmt.Errorf("failed to red header: read %d bytes, not 16 %v", n, err)
|
||||
}
|
||||
// by hand. there are only 6 or so legitimate values; we could search for a match
|
||||
if buf[0] != 'g' || buf[1] != 'o' || buf[2] != ' ' ||
|
||||
buf[3] < '1' || buf[3] > '9' ||
|
||||
buf[4] != '.' ||
|
||||
buf[5] < '1' || buf[5] > '9' {
|
||||
return fmt.Errorf("not a trace file")
|
||||
}
|
||||
ver := int(buf[5] - '0')
|
||||
i := 0
|
||||
for ; buf[6+i] >= '0' && buf[6+i] <= '9' && i < 2; i++ {
|
||||
ver = ver*10 + int(buf[6+i]-'0')
|
||||
}
|
||||
ver += int(buf[3]-'0') * 1000
|
||||
if !bytes.Equal(buf[6+i:], []byte(" trace\x00\x00\x00\x00")[:10-i]) {
|
||||
return fmt.Errorf("not a trace file")
|
||||
}
|
||||
p.Version = ver
|
||||
// PJW: reject 1005 and 1007? They need symbolization, which we don't do.
|
||||
// Further, doing these would require 1.7 or earlier binaries.
|
||||
switch ver {
|
||||
case 1005, 1007:
|
||||
break // no longer supported
|
||||
case 1008, 1009:
|
||||
return nil
|
||||
case 1010, 1011:
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("%d unsupported version", ver)
|
||||
}
|
||||
|
||||
func (p *Parsed) scanFile() error {
|
||||
r := p.r
|
||||
// fill in the following values for sure
|
||||
strings := make(map[uint64]string)
|
||||
p.Strings = strings // ok to save maps immediately
|
||||
timerGoIDs := make(map[uint64]bool)
|
||||
p.timerGoids = timerGoIDs
|
||||
stacks := make(map[uint32][]*Frame)
|
||||
framer := make(map[Frame]*Frame) // uniqify *Frame
|
||||
p.Stacks = stacks
|
||||
footerLoc := 0
|
||||
|
||||
var buf [1]byte
|
||||
off := 16 // skip the header
|
||||
n, err := r.Seek(int64(off), 0)
|
||||
if err != nil || n != int64(off) {
|
||||
return fmt.Errorf("Seek to %d got %d, err=%v", off, n, err)
|
||||
}
|
||||
var batchts int64 // from preceding batch
|
||||
var lastEv byte
|
||||
for {
|
||||
off0 := off
|
||||
n, err := r.Read(buf[:1])
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if err != nil || n != 1 {
|
||||
return fmt.Errorf("read failed at 0x%x, n=%d, %v",
|
||||
off, n, err)
|
||||
}
|
||||
off += n
|
||||
typ := buf[0] << 2 >> 2
|
||||
if typ == EvNone || typ >= EvCount ||
|
||||
EventDescriptions[typ].MinVersion > p.Version {
|
||||
err = fmt.Errorf("unknown event type %v at offset 0x%x, pass 1", typ, off0)
|
||||
return err
|
||||
}
|
||||
// extract and save the strings
|
||||
if typ == EvString {
|
||||
// String dictionary entry [ID, length, string].
|
||||
var id uint64
|
||||
id, off, err = readVal(r, off)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if id == 0 {
|
||||
err = fmt.Errorf("string at offset %d has invalid id 0", off)
|
||||
return err
|
||||
}
|
||||
if strings[id] != "" {
|
||||
err = fmt.Errorf("string at offset %d has duplicate id %v", off, id)
|
||||
return err
|
||||
}
|
||||
var ln uint64
|
||||
ln, off, err = readVal(r, off)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ln == 0 {
|
||||
err = fmt.Errorf("string at offset %d has invalid length 0", off)
|
||||
return err
|
||||
}
|
||||
if ln > 1e6 {
|
||||
err = fmt.Errorf("string at offset %d has too large length %v", off, ln)
|
||||
return err
|
||||
}
|
||||
buf := make([]byte, ln)
|
||||
var n int
|
||||
n, err = io.ReadFull(r, buf)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to read trace at offset %d: read %v, want %v, error %v", off, n, ln, err)
|
||||
return err
|
||||
}
|
||||
off += n
|
||||
strings[id] = string(buf)
|
||||
lastEv = EvString
|
||||
continue
|
||||
}
|
||||
p.Count++
|
||||
if typ == EvFrequency {
|
||||
// found footer, remember location, save value
|
||||
footerLoc = off0
|
||||
}
|
||||
var args []uint64
|
||||
off, args, err = p.argsAt(off0, typ)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("argsAt error %v; off=%d off0=%d %s",
|
||||
err, off, off0, evname(typ))
|
||||
return err
|
||||
}
|
||||
r.Seek(int64(off), 0)
|
||||
if typ == EvUserLog {
|
||||
_, off, err = readStr(r, off)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if len(args) == 0 { // can't happen in well-formed trace file
|
||||
return fmt.Errorf("len(args)==0 off=0x%x typ=%s", off, evname(typ))
|
||||
}
|
||||
switch typ {
|
||||
case EvBatch:
|
||||
if footerLoc == 0 {
|
||||
// EvBatch in footer is just to have a header for stacks
|
||||
locp := int64(args[0])
|
||||
p.batches = append(p.batches,
|
||||
batch{Off: off0, P: locp, Cycles: int64(args[1])})
|
||||
// at this point we know when the previous batch ended!!
|
||||
batchts = int64(args[1])
|
||||
if batchts > p.maxticks {
|
||||
p.maxticks = batchts
|
||||
}
|
||||
}
|
||||
case EvFrequency:
|
||||
p.TicksPerSec = int64(args[0])
|
||||
case EvTimerGoroutine:
|
||||
timerGoIDs[args[0]] = true
|
||||
case EvStack:
|
||||
if len(args) < 2 {
|
||||
return fmt.Errorf("EvStack has too few args %d at 0x%x",
|
||||
len(args), off0)
|
||||
}
|
||||
size := args[1]
|
||||
if size > 1000 {
|
||||
return fmt.Errorf("EvStack has %d frames at 0x%x",
|
||||
size, off0)
|
||||
}
|
||||
want := 2 + 4*size
|
||||
if uint64(len(args)) != want {
|
||||
return fmt.Errorf("EvStack wants %d args, got %d, at 0x%x",
|
||||
len(args), want, off0)
|
||||
}
|
||||
id := args[0]
|
||||
if id != 0 && size > 0 {
|
||||
stk := make([]*Frame, size)
|
||||
for i := 0; i < int(size); i++ {
|
||||
pc := args[2+i*4+0]
|
||||
fn := args[2+i*4+1]
|
||||
file := args[2+i*4+2]
|
||||
line := args[2+i*4+3]
|
||||
stk[i] = &Frame{PC: pc, Fn: strings[fn], File: strings[file], Line: int(line)}
|
||||
if _, ok := framer[*stk[i]]; !ok {
|
||||
framer[*stk[i]] = stk[i]
|
||||
}
|
||||
stk[i] = framer[*stk[i]]
|
||||
}
|
||||
stacks[uint32(id)] = stk
|
||||
}
|
||||
default:
|
||||
if lastEv == EvBatch {
|
||||
// p.MinTsVal is set by the first real event, not the first EvBatch
|
||||
x := batchts + int64(args[0])
|
||||
if x < p.minticks {
|
||||
p.minticks = x
|
||||
}
|
||||
}
|
||||
batchts += int64(args[0])
|
||||
if batchts > p.maxticks {
|
||||
p.maxticks = batchts
|
||||
}
|
||||
}
|
||||
lastEv = typ
|
||||
}
|
||||
if footerLoc <= 0 {
|
||||
return fmt.Errorf("malformed trace file, no EvFrequency")
|
||||
}
|
||||
return nil
|
||||
}
|
@ -1,165 +0,0 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package filebuf implements io.SeekReader for os files.
|
||||
// This is useful only for very large files with lots of
|
||||
// seeking. (otherwise use ioutil.ReadFile or bufio)
|
||||
package filebuf
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// Buf is the implemented interface
|
||||
type Buf interface {
|
||||
io.ReadCloser
|
||||
io.Seeker
|
||||
Size() int64
|
||||
Stats() Stat
|
||||
}
|
||||
|
||||
// Buflen is the size of the internal buffer.
|
||||
// The code is designed to never need to reread unnecessarily
|
||||
const Buflen = 1 << 20
|
||||
|
||||
// fbuf is a buffered file with seeking.
|
||||
// fixed is an internal buffer. buf is the slice fixed[:fixedLen]. bufloc is the file
|
||||
// location of the beginning of fixed (and buf). The seek pointer is at bufloc+bufpos,
|
||||
// so the file's contents there start with buf[bufpos:]
|
||||
type fbuf struct {
|
||||
Name string
|
||||
fd *os.File
|
||||
size int64 // file size
|
||||
bufloc int64 // file loc of beginning of fixed
|
||||
bufpos int32 // seekptr is at bufloc+bufpos. bufpos <= Buflen, fixedLen
|
||||
fixed [Buflen]byte // backing store for buf
|
||||
fixedlen int // how much of fixed is valid file contents
|
||||
buf []byte // buf is fixed[0:fixedlen]
|
||||
// statistics
|
||||
seeks int // number of calls to fd.Seek
|
||||
reads int // number of calls to fd.Read
|
||||
bytes int64 // number of bytes read by fd.Read
|
||||
}
|
||||
|
||||
// Stat returns the number of underlying seeks and reads, and bytes read
|
||||
type Stat struct {
|
||||
Seeks int
|
||||
Reads int
|
||||
Bytes int64
|
||||
}
|
||||
|
||||
// Stats returns the stats so far
|
||||
func (fb *fbuf) Stats() Stat {
|
||||
return Stat{fb.seeks, fb.reads, fb.bytes}
|
||||
}
|
||||
|
||||
// Size returns the file size
|
||||
func (fb *fbuf) Size() int64 {
|
||||
return fb.size
|
||||
}
|
||||
|
||||
// New returns an initialized *fbuf or an error
|
||||
func New(fname string) (Buf, error) {
|
||||
fd, err := os.Open(fname)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fi, err := fd.Stat()
|
||||
if err != nil || fi.Mode().IsDir() {
|
||||
return nil, fmt.Errorf("not readable: %s", fname)
|
||||
}
|
||||
return &fbuf{Name: fname, fd: fd, size: fi.Size()}, nil
|
||||
}
|
||||
|
||||
// Read implements io.Reader. It may return a positive
|
||||
// number of bytes read with io.EOF
|
||||
func (fb *fbuf) Read(p []byte) (int, error) {
|
||||
// If there are enough valid bytes remaining in buf, just use them
|
||||
if len(fb.buf[fb.bufpos:]) >= len(p) {
|
||||
copy(p, fb.buf[fb.bufpos:])
|
||||
fb.bufpos += int32(len(p))
|
||||
return len(p), nil
|
||||
}
|
||||
done := 0 // done counts how many bytes have been transferred
|
||||
// If there are any valid bytes left in buf, use them first
|
||||
if len(fb.buf[fb.bufpos:]) > 0 {
|
||||
m := copy(p, fb.buf[fb.bufpos:])
|
||||
done = m
|
||||
fb.bufpos += int32(done) // at end of the valid bytes in buf
|
||||
}
|
||||
// used up buffered data. logical seek pointer is at bufloc+bufpos.
|
||||
// loop until p has been filled up or EOF
|
||||
for done < len(p) {
|
||||
loc, err := fb.fd.Seek(0, io.SeekCurrent) // make sure of the os's file location
|
||||
if loc != fb.bufloc+int64(fb.bufpos) {
|
||||
panic(fmt.Sprintf("%v loc=%d bufloc=%d bufpos=%d", err, loc,
|
||||
fb.bufloc, fb.bufpos))
|
||||
}
|
||||
fb.seeks++ // did a file system seek
|
||||
if loc >= fb.size {
|
||||
// at EOF
|
||||
fb.bufpos = int32(len(fb.buf))
|
||||
fb.bufloc = loc - int64(fb.fixedlen)
|
||||
return done, io.EOF
|
||||
}
|
||||
n, err := fb.fd.Read(fb.fixed[:])
|
||||
if n != 0 {
|
||||
fb.fixedlen = n
|
||||
}
|
||||
fb.reads++ // did a file system read
|
||||
m := copy(p[done:], fb.fixed[:n])
|
||||
done += m
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
fb.bufpos = int32(len(fb.buf))
|
||||
fb.bufloc = loc - int64(fb.fixedlen)
|
||||
return done, io.EOF
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
fb.bytes += int64(n)
|
||||
fb.bufpos = int32(m) // used m byes of the buffer
|
||||
fb.bufloc = loc
|
||||
fb.buf = fb.fixed[:n]
|
||||
}
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// Seek implements io.Seeker. (<unchanged>, io.EOF) is returned for seeks off the end.
|
||||
func (fb *fbuf) Seek(offset int64, whence int) (int64, error) {
|
||||
seekpos := offset
|
||||
switch whence {
|
||||
case io.SeekCurrent:
|
||||
seekpos += fb.bufloc + int64(fb.bufpos)
|
||||
case io.SeekEnd:
|
||||
seekpos += fb.size
|
||||
}
|
||||
if seekpos < 0 || seekpos > fb.size {
|
||||
return fb.bufloc + int64(fb.bufpos), io.EOF
|
||||
}
|
||||
// if seekpos is inside fixed, just adjust buf and bufpos
|
||||
if seekpos >= fb.bufloc && seekpos <= int64(fb.fixedlen)+fb.bufloc {
|
||||
fb.bufpos = int32(seekpos - fb.bufloc)
|
||||
return seekpos, nil
|
||||
}
|
||||
// need to refresh the internal buffer. Seek does no reading, mark buf
|
||||
// as empty, set bufpos and bufloc.
|
||||
fb.buf, fb.bufpos, fb.bufloc = nil, 0, seekpos
|
||||
n, err := fb.fd.Seek(seekpos, io.SeekStart)
|
||||
fb.seeks++
|
||||
if n != seekpos || err != nil {
|
||||
return -1, fmt.Errorf("seek failed (%d!= %d) %v", n, seekpos, err)
|
||||
}
|
||||
return seekpos, nil
|
||||
}
|
||||
|
||||
// Close closes the underlying file
|
||||
func (fb *fbuf) Close() error {
|
||||
if fb.fd != nil {
|
||||
return fb.fd.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
@ -1,204 +0,0 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package filebuf
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var (
|
||||
inited bool
|
||||
small, large string // files
|
||||
dir string // in this dir
|
||||
contents []byte // contents of the large file
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
create()
|
||||
n := m.Run()
|
||||
|
||||
os.RemoveAll(dir)
|
||||
os.Exit(n)
|
||||
}
|
||||
|
||||
func create() {
|
||||
if inited {
|
||||
return
|
||||
}
|
||||
log.SetFlags(log.Lshortfile)
|
||||
d, erra := ioutil.TempDir("", "filebuf")
|
||||
s, errb := ioutil.TempFile(dir, "small")
|
||||
l, errc := ioutil.TempFile(dir, "large")
|
||||
if erra != nil || errb != nil || errc != nil {
|
||||
log.Fatal(erra, errb, errc)
|
||||
}
|
||||
dir, small, large = d, s.Name(), l.Name()
|
||||
buf := make([]byte, 2*Buflen+3)
|
||||
for i := 0; i < len(buf); i++ {
|
||||
buf[i] = byte(i)
|
||||
}
|
||||
err := ioutil.WriteFile(small, buf[:7], 0666)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
err = ioutil.WriteFile(large, buf, 0666)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
contents = buf
|
||||
inited = true
|
||||
}
|
||||
|
||||
func get(n int) io.Reader {
|
||||
if n <= len(contents) {
|
||||
return bytes.NewReader(contents[:n])
|
||||
}
|
||||
return bytes.NewReader(contents)
|
||||
}
|
||||
|
||||
func TestSmall(t *testing.T) {
|
||||
var f Buf
|
||||
var err error
|
||||
f, err = New(small)
|
||||
small := func(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
buf := make([]byte, 23)
|
||||
n, err := f.Read(buf)
|
||||
if n != 7 || err != io.EOF {
|
||||
t.Errorf("got %d, expected 7, %v", n, err)
|
||||
}
|
||||
m, err := f.Seek(0, io.SeekCurrent)
|
||||
if m != 7 || err != nil {
|
||||
t.Errorf("got %d, expected 7, %v", m, err)
|
||||
}
|
||||
m, err = f.Seek(1, io.SeekStart)
|
||||
if m != 1 || err != nil {
|
||||
t.Errorf("got %d expected 1, %v", m, err)
|
||||
}
|
||||
n, err = f.Read(buf)
|
||||
if n != 6 || err != io.EOF {
|
||||
t.Errorf("got %d, expected 6, %v", n, err)
|
||||
}
|
||||
for i := 0; i < 6; i++ {
|
||||
if buf[i] != byte(i+1) {
|
||||
t.Fatalf("byte %d is %d, not %d, %v", i, buf[i], i+1, buf)
|
||||
}
|
||||
}
|
||||
}
|
||||
t.Run("New", small)
|
||||
f, err = FromReader(get(7))
|
||||
t.Run("Rdr", small)
|
||||
}
|
||||
|
||||
func TestLarge(t *testing.T) {
|
||||
var f Buf
|
||||
var err error
|
||||
big := func(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
x := Buflen - 7
|
||||
n, err := f.Seek(int64(x), io.SeekStart)
|
||||
if n != Buflen-7 || err != nil {
|
||||
t.Fatalf("expected %d, got %d, %v", x, n, err)
|
||||
}
|
||||
buf := make([]byte, 23)
|
||||
m, err := f.Read(buf)
|
||||
if m != len(buf) || err != nil {
|
||||
t.Fatalf("expected %d, got %d, %v", len(buf), m, err)
|
||||
}
|
||||
for i := 0; i < 23; i++ {
|
||||
if buf[i] != byte(x+i) {
|
||||
t.Fatalf("byte %d, got %d, wanted %d", i, buf[i],
|
||||
byte(x+i))
|
||||
}
|
||||
}
|
||||
m, err = f.Read(buf)
|
||||
if m != len(buf) || err != nil {
|
||||
t.Fatalf("got %d, expected %d, %v", m, len(buf), err)
|
||||
}
|
||||
x += len(buf)
|
||||
for i := 0; i < 23; i++ {
|
||||
if buf[i] != byte(x+i) {
|
||||
t.Fatalf("byte %d, got %d, wanted %d", i, buf[i],
|
||||
byte(x+i))
|
||||
}
|
||||
}
|
||||
}
|
||||
f, err = New(large)
|
||||
t.Run("New", big)
|
||||
f, err = FromReader(get(1 << 30))
|
||||
t.Run("Rdr", big)
|
||||
}
|
||||
|
||||
func TestMore(t *testing.T) {
|
||||
f, err := New(large)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var a, b [4]byte
|
||||
f.Seek(16, 0)
|
||||
f.Read(a[:])
|
||||
f.Seek(16, 0)
|
||||
f.Read(b[:])
|
||||
if a != b {
|
||||
t.Errorf("oops %v %v", a, b)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSeek(t *testing.T) {
|
||||
f, err := New(small)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
n, err := f.Seek(f.Size(), 0)
|
||||
if n != f.Size() || err != nil {
|
||||
t.Errorf("seek got %d, expected %d, %v", n, f.Size(), err)
|
||||
}
|
||||
n, err = f.Seek(1, io.SeekCurrent)
|
||||
if n != f.Size() || err != io.EOF {
|
||||
t.Errorf("n=%d, expected 0. %v", n, err)
|
||||
}
|
||||
n, err = f.Seek(f.Size(), 0)
|
||||
if n != f.Size() || err != nil {
|
||||
t.Errorf("seek got %d, expected %d, %v", n, f.Size(), err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReread(t *testing.T) {
|
||||
f, err := New(small)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var buf [1]byte
|
||||
f.Seek(0, 0)
|
||||
for i := 0; i < int(f.Size()); i++ {
|
||||
n, err := f.Read(buf[:])
|
||||
if n != 1 || err != nil {
|
||||
t.Fatalf("n=%d, err=%v", n, err)
|
||||
}
|
||||
}
|
||||
stats := f.Stats()
|
||||
if stats.Bytes != f.Size() || stats.Reads != 1 || stats.Seeks != 1 {
|
||||
t.Errorf("%v %d %d", stats, f.(*fbuf).bufloc, f.(*fbuf).bufpos)
|
||||
}
|
||||
n, err := f.Read(buf[:])
|
||||
if n != 0 || err != io.EOF {
|
||||
t.Fatalf("expected 0 and io.EOF, got %d %v", n, err)
|
||||
}
|
||||
f.Seek(0, 0)
|
||||
xstats := f.Stats()
|
||||
if xstats.Bytes != f.Size() || xstats.Reads != 1 || xstats.Seeks != 2 {
|
||||
t.Errorf("%v %v %d %d", stats, xstats, f.(*fbuf).bufloc, f.(*fbuf).bufpos)
|
||||
}
|
||||
f.Close()
|
||||
}
|
@ -1,71 +0,0 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package filebuf
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
)
|
||||
|
||||
// implement a Buf version from an io.Reader
|
||||
|
||||
type rbuf struct {
|
||||
buf []byte // contents
|
||||
pos int64
|
||||
seeks, reads int // number of calls. 0 seems right.
|
||||
}
|
||||
|
||||
func (r *rbuf) Stats() Stat {
|
||||
return Stat{r.seeks, r.reads, int64(len(r.buf))}
|
||||
}
|
||||
|
||||
func (r *rbuf) Size() int64 {
|
||||
return int64(len(r.buf))
|
||||
}
|
||||
|
||||
// FromReader creates a Buf by copying the contents of an io.Reader
|
||||
func FromReader(rd io.Reader) (Buf, error) {
|
||||
r := &rbuf{}
|
||||
x := bytes.NewBuffer(r.buf)
|
||||
_, err := io.Copy(x, rd)
|
||||
r.buf = x.Bytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (r *rbuf) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *rbuf) Read(p []byte) (int, error) {
|
||||
n := copy(p, r.buf[r.pos:])
|
||||
r.pos += int64(n)
|
||||
if n == 0 || n < len(p) {
|
||||
return n, io.EOF
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (r *rbuf) Seek(offset int64, whence int) (int64, error) {
|
||||
seekpos := offset
|
||||
switch whence {
|
||||
case io.SeekCurrent:
|
||||
seekpos += int64(r.pos)
|
||||
case io.SeekEnd:
|
||||
seekpos += int64(len(r.buf))
|
||||
}
|
||||
if seekpos < 0 || seekpos > int64(len(r.buf)) {
|
||||
if seekpos < 0 {
|
||||
r.pos = 0
|
||||
return 0, nil
|
||||
}
|
||||
r.pos = int64(len(r.buf))
|
||||
return r.pos, nil
|
||||
}
|
||||
r.pos = seekpos
|
||||
return seekpos, nil
|
||||
}
|
@ -1,49 +0,0 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build gofuzz
|
||||
|
||||
package traceparser
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"log"
|
||||
)
|
||||
|
||||
// at first we ran the old parser, and return 0 if it failed, on the theory that we don't have
|
||||
// to do better. But that leads to very few crashes to look at.
|
||||
// Maybe better just to make it so that the new parser doesn't misbehave, and if it doesn't get
|
||||
// an error, that the old parser gets the same results. (up to whatever)
|
||||
// perhaps even better would be to seed corpus with examples from which the 16-byte header
|
||||
// has been stripped, and add it in Fuzz, so the fuzzer doesn't spend a lot of time making
|
||||
// changes we reject in the header. (this may not be necessary)
|
||||
|
||||
func Fuzz(data []byte) int {
|
||||
if len(data) < 16 {
|
||||
return 0
|
||||
}
|
||||
switch x := string(data[:16]); x {
|
||||
default:
|
||||
return 0
|
||||
case "go 1.9 trace\000\000\000\000":
|
||||
break
|
||||
case "go 1.10 trace\000\000\000":
|
||||
break
|
||||
case "go 1.11 trace\000\000\000":
|
||||
break
|
||||
}
|
||||
p, errp := ParseBuffer(bytes.NewBuffer(data))
|
||||
if errp != nil {
|
||||
if p != nil {
|
||||
panic(fmt.Sprintf("p not nil on error %s", errp))
|
||||
}
|
||||
}
|
||||
// TODO(pjw): if no errors, compare parses?
|
||||
return 1
|
||||
}
|
||||
|
||||
func init() {
|
||||
log.SetFlags(log.Lshortfile)
|
||||
}
|
@ -1,341 +0,0 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package traceparser
|
||||
|
||||
import "sort"
|
||||
|
||||
// GDesc contains statistics and execution details of a single goroutine.
|
||||
type GDesc struct {
|
||||
ID uint64
|
||||
Name string
|
||||
PC uint64
|
||||
CreationTime int64
|
||||
StartTime int64
|
||||
EndTime int64
|
||||
|
||||
// List of regions in the goroutine, sorted based on the start time.
|
||||
Regions []*UserRegionDesc
|
||||
|
||||
// Statistics of execution time during the goroutine execution.
|
||||
GExecutionStat
|
||||
|
||||
*gDesc // private part.
|
||||
}
|
||||
|
||||
// UserRegionDesc represents a region and goroutine execution stats
|
||||
// while the region was active.
|
||||
type UserRegionDesc struct {
|
||||
TaskID uint64
|
||||
Name string
|
||||
|
||||
// Region start event. Normally EvUserRegion start event or nil,
|
||||
// but can be EvGoCreate event if the region is a synthetic
|
||||
// region representing task inheritance from the parent goroutine.
|
||||
Start *Event
|
||||
|
||||
// Region end event. Normally EvUserRegion end event or nil,
|
||||
// but can be EvGoStop or EvGoEnd event if the goroutine
|
||||
// terminated without explicitely ending the region.
|
||||
End *Event
|
||||
|
||||
GExecutionStat
|
||||
}
|
||||
|
||||
// GExecutionStat contains statistics about a goroutine's execution
|
||||
// during a period of time.
|
||||
type GExecutionStat struct {
|
||||
ExecTime int64
|
||||
SchedWaitTime int64
|
||||
IOTime int64
|
||||
BlockTime int64
|
||||
SyscallTime int64
|
||||
GCTime int64
|
||||
SweepTime int64
|
||||
TotalTime int64
|
||||
}
|
||||
|
||||
// sub returns the stats v-s.
|
||||
func (s GExecutionStat) sub(v GExecutionStat) (r GExecutionStat) {
|
||||
r = s
|
||||
r.ExecTime -= v.ExecTime
|
||||
r.SchedWaitTime -= v.SchedWaitTime
|
||||
r.IOTime -= v.IOTime
|
||||
r.BlockTime -= v.BlockTime
|
||||
r.SyscallTime -= v.SyscallTime
|
||||
r.GCTime -= v.GCTime
|
||||
r.SweepTime -= v.SweepTime
|
||||
r.TotalTime -= v.TotalTime
|
||||
return r
|
||||
}
|
||||
|
||||
// snapshotStat returns the snapshot of the goroutine execution statistics.
|
||||
// This is called as we process the ordered trace event stream. lastTs and
|
||||
// activeGCStartTime are used to process pending statistics if this is called
|
||||
// before any goroutine end event.
|
||||
func (g *GDesc) snapshotStat(lastTs, activeGCStartTime int64) (ret GExecutionStat) {
|
||||
ret = g.GExecutionStat
|
||||
|
||||
if g.gDesc == nil {
|
||||
return ret // finalized GDesc. No pending state.
|
||||
}
|
||||
|
||||
if activeGCStartTime != 0 { // terminating while GC is active
|
||||
if g.CreationTime < activeGCStartTime {
|
||||
ret.GCTime += lastTs - activeGCStartTime
|
||||
} else {
|
||||
// The goroutine's lifetime completely overlaps
|
||||
// with a GC.
|
||||
ret.GCTime += lastTs - g.CreationTime
|
||||
}
|
||||
}
|
||||
|
||||
if g.TotalTime == 0 {
|
||||
ret.TotalTime = lastTs - g.CreationTime
|
||||
}
|
||||
|
||||
if g.lastStartTime != 0 {
|
||||
ret.ExecTime += lastTs - g.lastStartTime
|
||||
}
|
||||
if g.blockNetTime != 0 {
|
||||
ret.IOTime += lastTs - g.blockNetTime
|
||||
}
|
||||
if g.blockSyncTime != 0 {
|
||||
ret.BlockTime += lastTs - g.blockSyncTime
|
||||
}
|
||||
if g.blockSyscallTime != 0 {
|
||||
ret.SyscallTime += lastTs - g.blockSyscallTime
|
||||
}
|
||||
if g.blockSchedTime != 0 {
|
||||
ret.SchedWaitTime += lastTs - g.blockSchedTime
|
||||
}
|
||||
if g.blockSweepTime != 0 {
|
||||
ret.SweepTime += lastTs - g.blockSweepTime
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// finalize is called when processing a goroutine end event or at
|
||||
// the end of trace processing. This finalizes the execution stat
|
||||
// and any active regions in the goroutine, in which case trigger is nil.
|
||||
func (g *GDesc) finalize(lastTs, activeGCStartTime int64, trigger *Event) {
|
||||
if trigger != nil {
|
||||
g.EndTime = trigger.Ts
|
||||
}
|
||||
finalStat := g.snapshotStat(lastTs, activeGCStartTime)
|
||||
|
||||
g.GExecutionStat = finalStat
|
||||
for _, s := range g.activeRegions {
|
||||
s.End = trigger
|
||||
s.GExecutionStat = finalStat.sub(s.GExecutionStat)
|
||||
g.Regions = append(g.Regions, s)
|
||||
}
|
||||
*(g.gDesc) = gDesc{}
|
||||
}
|
||||
|
||||
// gDesc is a private part of GDesc that is required only during analysis.
|
||||
type gDesc struct {
|
||||
lastStartTime int64
|
||||
blockNetTime int64
|
||||
blockSyncTime int64
|
||||
blockSyscallTime int64
|
||||
blockSweepTime int64
|
||||
blockGCTime int64
|
||||
blockSchedTime int64
|
||||
|
||||
activeRegions []*UserRegionDesc // stack of active regions
|
||||
}
|
||||
|
||||
// GoroutineStats generates statistics for all goroutines in the trace segment.
|
||||
func (p *Parsed) GoroutineStats() map[uint64]*GDesc {
|
||||
events := p.Events
|
||||
gs := make(map[uint64]*GDesc)
|
||||
var lastTs int64
|
||||
var gcStartTime int64 // gcStartTime == 0 indicates gc is inactive.
|
||||
for _, ev := range events {
|
||||
lastTs = ev.Ts
|
||||
switch ev.Type {
|
||||
case EvGoCreate:
|
||||
g := &GDesc{ID: ev.Args[0], CreationTime: ev.Ts, gDesc: new(gDesc)}
|
||||
g.blockSchedTime = ev.Ts
|
||||
// When a goroutine is newly created, inherit the
|
||||
// task of the active region. For ease handling of
|
||||
// this case, we create a fake region description with
|
||||
// the task id.
|
||||
if creatorG := gs[ev.G]; creatorG != nil && len(creatorG.gDesc.activeRegions) > 0 {
|
||||
regions := creatorG.gDesc.activeRegions
|
||||
s := regions[len(regions)-1]
|
||||
if s.TaskID != 0 {
|
||||
g.gDesc.activeRegions = []*UserRegionDesc{
|
||||
{TaskID: s.TaskID, Start: ev},
|
||||
}
|
||||
}
|
||||
}
|
||||
gs[g.ID] = g
|
||||
case EvGoStart, EvGoStartLabel:
|
||||
g := gs[ev.G]
|
||||
if g.PC == 0 {
|
||||
stk := p.Stacks[ev.StkID]
|
||||
g.PC = stk[0].PC
|
||||
g.Name = stk[0].Fn
|
||||
}
|
||||
g.lastStartTime = ev.Ts
|
||||
if g.StartTime == 0 {
|
||||
g.StartTime = ev.Ts
|
||||
}
|
||||
if g.blockSchedTime != 0 {
|
||||
g.SchedWaitTime += ev.Ts - g.blockSchedTime
|
||||
g.blockSchedTime = 0
|
||||
}
|
||||
case EvGoEnd, EvGoStop:
|
||||
g := gs[ev.G]
|
||||
g.finalize(ev.Ts, gcStartTime, ev)
|
||||
case EvGoBlockSend, EvGoBlockRecv, EvGoBlockSelect,
|
||||
EvGoBlockSync, EvGoBlockCond:
|
||||
g := gs[ev.G]
|
||||
g.ExecTime += ev.Ts - g.lastStartTime
|
||||
g.lastStartTime = 0
|
||||
g.blockSyncTime = ev.Ts
|
||||
case EvGoSched, EvGoPreempt:
|
||||
g := gs[ev.G]
|
||||
g.ExecTime += ev.Ts - g.lastStartTime
|
||||
g.lastStartTime = 0
|
||||
g.blockSchedTime = ev.Ts
|
||||
case EvGoSleep, EvGoBlock:
|
||||
g := gs[ev.G]
|
||||
g.ExecTime += ev.Ts - g.lastStartTime
|
||||
g.lastStartTime = 0
|
||||
case EvGoBlockNet:
|
||||
g := gs[ev.G]
|
||||
g.ExecTime += ev.Ts - g.lastStartTime
|
||||
g.lastStartTime = 0
|
||||
g.blockNetTime = ev.Ts
|
||||
case EvGoBlockGC:
|
||||
g := gs[ev.G]
|
||||
g.ExecTime += ev.Ts - g.lastStartTime
|
||||
g.lastStartTime = 0
|
||||
g.blockGCTime = ev.Ts
|
||||
case EvGoUnblock:
|
||||
g := gs[ev.Args[0]]
|
||||
if g.blockNetTime != 0 {
|
||||
g.IOTime += ev.Ts - g.blockNetTime
|
||||
g.blockNetTime = 0
|
||||
}
|
||||
if g.blockSyncTime != 0 {
|
||||
g.BlockTime += ev.Ts - g.blockSyncTime
|
||||
g.blockSyncTime = 0
|
||||
}
|
||||
g.blockSchedTime = ev.Ts
|
||||
case EvGoSysBlock:
|
||||
g := gs[ev.G]
|
||||
g.ExecTime += ev.Ts - g.lastStartTime
|
||||
g.lastStartTime = 0
|
||||
g.blockSyscallTime = ev.Ts
|
||||
case EvGoSysExit:
|
||||
g := gs[ev.G]
|
||||
if g.blockSyscallTime != 0 {
|
||||
g.SyscallTime += ev.Ts - g.blockSyscallTime
|
||||
g.blockSyscallTime = 0
|
||||
}
|
||||
g.blockSchedTime = ev.Ts
|
||||
case EvGCSweepStart:
|
||||
g := gs[ev.G]
|
||||
if g != nil {
|
||||
// Sweep can happen during GC on system goroutine.
|
||||
g.blockSweepTime = ev.Ts
|
||||
}
|
||||
case EvGCSweepDone:
|
||||
g := gs[ev.G]
|
||||
if g != nil && g.blockSweepTime != 0 {
|
||||
g.SweepTime += ev.Ts - g.blockSweepTime
|
||||
g.blockSweepTime = 0
|
||||
}
|
||||
case EvGCStart:
|
||||
gcStartTime = ev.Ts
|
||||
case EvGCDone:
|
||||
for _, g := range gs {
|
||||
if g.EndTime != 0 {
|
||||
continue
|
||||
}
|
||||
if gcStartTime < g.CreationTime {
|
||||
g.GCTime += ev.Ts - g.CreationTime
|
||||
} else {
|
||||
g.GCTime += ev.Ts - gcStartTime
|
||||
}
|
||||
}
|
||||
gcStartTime = 0 // indicates gc is inactive.
|
||||
case EvUserRegion:
|
||||
g := gs[ev.G]
|
||||
switch mode := ev.Args[1]; mode {
|
||||
case 0: // region start
|
||||
g.activeRegions = append(g.activeRegions, &UserRegionDesc{
|
||||
Name: ev.SArgs[0],
|
||||
TaskID: ev.Args[0],
|
||||
Start: ev,
|
||||
GExecutionStat: g.snapshotStat(lastTs, gcStartTime),
|
||||
})
|
||||
case 1: // region end
|
||||
var sd *UserRegionDesc
|
||||
if regionStk := g.activeRegions; len(regionStk) > 0 {
|
||||
n := len(regionStk)
|
||||
sd = regionStk[n-1]
|
||||
regionStk = regionStk[:n-1] // pop
|
||||
g.activeRegions = regionStk
|
||||
} else {
|
||||
sd = &UserRegionDesc{
|
||||
Name: ev.SArgs[0],
|
||||
TaskID: ev.Args[0],
|
||||
}
|
||||
}
|
||||
sd.GExecutionStat = g.snapshotStat(lastTs, gcStartTime).sub(sd.GExecutionStat)
|
||||
sd.End = ev
|
||||
g.Regions = append(g.Regions, sd)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, g := range gs {
|
||||
g.finalize(lastTs, gcStartTime, nil)
|
||||
|
||||
// sort based on region start time
|
||||
sort.Slice(g.Regions, func(i, j int) bool {
|
||||
x := g.Regions[i].Start
|
||||
y := g.Regions[j].Start
|
||||
if x == nil {
|
||||
return true
|
||||
}
|
||||
if y == nil {
|
||||
return false
|
||||
}
|
||||
return x.Ts < y.Ts
|
||||
})
|
||||
|
||||
g.gDesc = nil
|
||||
}
|
||||
|
||||
return gs
|
||||
}
|
||||
|
||||
// RelatedGoroutines finds a set of goroutines related to goroutine goid.
|
||||
func (p *Parsed) RelatedGoroutines(goid uint64) map[uint64]bool {
|
||||
events := p.Events
|
||||
// BFS of depth 2 over "unblock" edges
|
||||
// (what goroutines unblock goroutine goid?).
|
||||
gmap := make(map[uint64]bool)
|
||||
gmap[goid] = true
|
||||
for i := 0; i < 2; i++ {
|
||||
gmap1 := make(map[uint64]bool)
|
||||
for g := range gmap {
|
||||
gmap1[g] = true
|
||||
}
|
||||
for _, ev := range events {
|
||||
if ev.Type == EvGoUnblock && gmap[ev.Args[0]] {
|
||||
gmap1[ev.G] = true
|
||||
}
|
||||
}
|
||||
gmap = gmap1
|
||||
}
|
||||
gmap[0] = true // for GC events
|
||||
return gmap
|
||||
}
|
@ -1,111 +0,0 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package traceparser
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var (
|
||||
// testfiles from the old trace parser
|
||||
otherDir = "../trace/testdata/"
|
||||
want = map[string]bool{"http_1_9_good": true, "http_1_10_good": true, "http_1_11_good": true,
|
||||
"stress_1_9_good": true, "stress_1_10_good": true, "stress_1_11_good": true,
|
||||
"stress_start_stop_1_9_good": true, "stress_start_stop_1_10_good": true,
|
||||
"stress_start_stop_1_11_good": true, "user_task_span_1_11_good": true,
|
||||
|
||||
"http_1_5_good": false, "http_1_7_good": false,
|
||||
"stress_1_5_good": false, "stress_1_5_unordered": false, "stress_1_7_good": false,
|
||||
"stress_start_stop_1_5_good": false, "stress_start_stop_1_7_good": false,
|
||||
}
|
||||
)
|
||||
|
||||
func TestRemoteFiles(t *testing.T) {
|
||||
if runtime.GOOS == "darwin" && (runtime.GOARCH == "arm" || runtime.GOARCH == "arm64") {
|
||||
t.Skipf("files from outside the package are not available on %s/%s", runtime.GOOS, runtime.GOARCH)
|
||||
}
|
||||
files, err := ioutil.ReadDir(otherDir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, f := range files {
|
||||
fname := filepath.Join(otherDir, f.Name())
|
||||
p, err := New(fname)
|
||||
if err == nil {
|
||||
err = p.Parse(0, 1<<62, nil)
|
||||
}
|
||||
if err == nil != want[f.Name()] {
|
||||
t.Errorf("%s: got %v expected %v, err=%v",
|
||||
f.Name(), err == nil, want[f.Name()], err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLocalFiles(t *testing.T) {
|
||||
|
||||
files, err := ioutil.ReadDir("./testdata")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read ./testdata: %v", err)
|
||||
}
|
||||
for _, f := range files {
|
||||
fname := filepath.Join("./testdata", f.Name())
|
||||
p, err := New(fname)
|
||||
if err == nil {
|
||||
err = p.Parse(0, 1<<62, nil)
|
||||
}
|
||||
switch {
|
||||
case strings.Contains(f.Name(), "good"),
|
||||
strings.Contains(f.Name(), "weird"):
|
||||
if err != nil {
|
||||
t.Errorf("unexpected failure %v %s", err, f.Name())
|
||||
}
|
||||
case strings.Contains(f.Name(), "bad"):
|
||||
if err == nil {
|
||||
t.Errorf("bad file did not fail %s", f.Name())
|
||||
}
|
||||
default:
|
||||
t.Errorf("untyped file %v %s", err, f.Name())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStats(t *testing.T) {
|
||||
// Need just one good file to see that OSStats work properly,
|
||||
files, err := ioutil.ReadDir("./testdata")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, f := range files {
|
||||
if !strings.HasPrefix(f.Name(), "good") {
|
||||
continue
|
||||
}
|
||||
fname := filepath.Join("./testdata", f.Name())
|
||||
p, err := New(fname)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
stat := p.OSStats()
|
||||
if stat.Bytes == 0 || stat.Seeks == 0 || stat.Reads == 0 {
|
||||
t.Errorf("OSStats impossible %v", stat)
|
||||
}
|
||||
fd, err := os.Open(fname)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
pb, err := ParseBuffer(fd)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
stat = pb.OSStats()
|
||||
if stat.Seeks != 0 || stat.Reads != 0 {
|
||||
t.Errorf("unexpected positive results %v", stat)
|
||||
}
|
||||
}
|
||||
}
|
@ -1,106 +0,0 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package traceparser
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"io"
|
||||
"log"
|
||||
)
|
||||
|
||||
// convert batches into their raw events. For small intervals (1 or 10 seconds)
|
||||
// this takes about 40% of the total Parse time.
|
||||
|
||||
func (p *Parsed) batchify(b *batch) error {
|
||||
evs := make([]rawEvent, 0)
|
||||
p.seenArgs = make(map[uint64]*[]uint64)
|
||||
hasher := fnv.New64()
|
||||
r := p.r
|
||||
r.Seek(int64(b.Off), 0)
|
||||
var buf [1]byte
|
||||
seenBatch := false // to terminate the loop on the second EvBatch
|
||||
|
||||
for off := b.Off; ; {
|
||||
off0 := off // remember the beginning of the event
|
||||
n, err := r.Read(buf[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
off += n
|
||||
typ := buf[0] << 2 >> 2 // event type is bottom 6 bits
|
||||
if typ == EvFrequency || (typ == EvBatch && seenBatch) {
|
||||
break // found trailer, or next batch
|
||||
}
|
||||
if typ == EvBatch {
|
||||
seenBatch = true
|
||||
}
|
||||
if typ == EvString {
|
||||
// skip over it. error checking was done in file.go
|
||||
_, off, _ = readVal(r, off)
|
||||
var ln uint64
|
||||
ln, off, _ = readVal(r, off)
|
||||
// PJW: why not just seek ahead ln bytes?
|
||||
if false {
|
||||
buf := make([]byte, ln)
|
||||
var n int
|
||||
n, _ = io.ReadFull(r, buf)
|
||||
off += n
|
||||
} else {
|
||||
n, _ := r.Seek(int64(ln), 1)
|
||||
off = int(n)
|
||||
}
|
||||
continue
|
||||
}
|
||||
// build the raw event and collect its arguments
|
||||
ev := rawEvent{typ: typ, off: uint32(off0 - b.Off)}
|
||||
var args []uint64
|
||||
off, args, err = p.argsAt(off0, typ)
|
||||
if err != nil {
|
||||
// PJW: make sure this is useful
|
||||
return fmt.Errorf("parsing %s failed at P=%d off=%d %v", evname(typ),
|
||||
b.P, off0, err)
|
||||
}
|
||||
|
||||
// have we seen the args before?
|
||||
if len(args) > 0 {
|
||||
ev.arg0 = args[0]
|
||||
if len(args) > 1 {
|
||||
hasher.Reset()
|
||||
for i := 1; i < len(args); i++ {
|
||||
var x [8]byte
|
||||
binary.LittleEndian.PutUint64(x[:], args[i])
|
||||
_, err := hasher.Write(x[:])
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
hc := hasher.Sum64()
|
||||
old, ok := p.seenArgs[hc]
|
||||
if !ok {
|
||||
final := make([]uint64, len(args)-1)
|
||||
copy(final, args[1:])
|
||||
p.seenArgs[hc] = &final
|
||||
} else {
|
||||
// is this a collision? PJW: make this precisely right
|
||||
if len(*old) != len(args[1:]) {
|
||||
log.Fatalf("COLLISION old:%v this:%v", *old, args[1:])
|
||||
}
|
||||
}
|
||||
ev.args = p.seenArgs[hc]
|
||||
}
|
||||
}
|
||||
if typ == EvUserLog {
|
||||
// argsAt didn't read the string argument
|
||||
var s string
|
||||
s, off, err = readStr(r, off)
|
||||
ev.sarg = s
|
||||
}
|
||||
evs = append(evs, ev)
|
||||
}
|
||||
b.raws = evs
|
||||
return nil
|
||||
}
|
@ -1,585 +0,0 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package traceparser
|
||||
|
||||
// there are panics for impossible situations. probably an error would be better
|
||||
// (if only it were certain these are impossible)
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
)
|
||||
|
||||
// repair an incomplete or possibly damaged interval of Events
|
||||
// so that postProcess is happy
|
||||
|
||||
// errors returned by checkRunning()
|
||||
const (
|
||||
ok = 0
|
||||
badRunning = 1 << iota
|
||||
badP
|
||||
badG0
|
||||
)
|
||||
|
||||
// states of g's and p's
|
||||
type gdesc struct {
|
||||
state gStatus
|
||||
ev, evStart, evCreate, evMarkAssist *Event
|
||||
}
|
||||
|
||||
type pdesc struct {
|
||||
running bool
|
||||
g uint64
|
||||
evSTW, evSweep *Event
|
||||
}
|
||||
|
||||
func locstr(ev *Event) string {
|
||||
if ev == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return fmt.Sprintf("%s:%x", evname(ev.Type), ev.Ts)
|
||||
}
|
||||
func (p pdesc) String() string {
|
||||
return fmt.Sprintf("[%v %d %s %s]", p.running, p.g, locstr(p.evSTW), locstr(p.evSweep))
|
||||
}
|
||||
|
||||
func (g gdesc) String() string {
|
||||
var nm string
|
||||
switch g.state {
|
||||
case gDead:
|
||||
nm = "dead"
|
||||
case gWaiting:
|
||||
nm = "waiting"
|
||||
case gRunnable:
|
||||
nm = "runnable"
|
||||
case gRunning:
|
||||
nm = "running"
|
||||
}
|
||||
f := locstr
|
||||
return fmt.Sprintf("[%s %s,%s,%s,%s]", nm, f(g.ev), f(g.evStart),
|
||||
f(g.evCreate), f(g.evMarkAssist))
|
||||
}
|
||||
|
||||
func checkRunning(pd pdesc, gd gdesc, ev *Event, okG0 bool) int {
|
||||
ret := ok
|
||||
if gd.state != gRunning {
|
||||
ret |= badRunning
|
||||
}
|
||||
if pd.g != ev.G {
|
||||
ret |= badP
|
||||
}
|
||||
if !okG0 && ev.G == 0 {
|
||||
ret |= badG0
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
type aux struct {
|
||||
pref []*Event // prefix
|
||||
evs []*Event // copies and inserted
|
||||
deleted map[byte]int // count by Type
|
||||
inserted map[byte]int // count by Type
|
||||
gs map[uint64]gdesc
|
||||
ps map[int32]pdesc
|
||||
g gdesc
|
||||
px pdesc
|
||||
my *Parsed
|
||||
input []*Event // events in call to robust()
|
||||
last int // last index handled by reorder
|
||||
err error // report inconsistent trace files
|
||||
}
|
||||
|
||||
func (a *aux) preftime() int64 {
|
||||
ts := a.my.MinWant - 1000
|
||||
if ts < 0 {
|
||||
ts = 0
|
||||
}
|
||||
if len(a.pref) > 0 {
|
||||
ts = a.pref[len(a.pref)-1].Ts + 1
|
||||
}
|
||||
return ts
|
||||
}
|
||||
func (a *aux) delete(i int, ev *Event) {
|
||||
a.deleted[ev.Type]++
|
||||
}
|
||||
func (a *aux) prefix(typ byte, g uint64, p int32) {
|
||||
ts := a.preftime()
|
||||
ev := &Event{Type: typ, G: g, P: p, Ts: ts,
|
||||
Args: [3]uint64{0, 0, 1}}
|
||||
a.pref = append(a.pref, ev)
|
||||
}
|
||||
func (a *aux) procstart(p int32) {
|
||||
if p >= FakeP || a.px.running {
|
||||
return
|
||||
}
|
||||
a.prefix(EvProcStart, 0, p)
|
||||
a.px.running = true
|
||||
}
|
||||
func (a *aux) makewaiting(i int, g uint64, typ byte) {
|
||||
// GoCreate, g=0 args[0]=g; maybe it exists already?
|
||||
// GoWaiting or GoInSysCall
|
||||
p := int32(a.my.batches[0].P)
|
||||
ev := &Event{Type: EvGoCreate, P: p,
|
||||
Ts: a.preftime(), Args: [3]uint64{g, 0, 2}}
|
||||
a.pref = append(a.pref, ev)
|
||||
a.gs[g] = gdesc{state: gRunnable, ev: ev, evCreate: ev}
|
||||
ev = &Event{Type: typ, G: g, P: p,
|
||||
Ts: a.preftime(), Args: [3]uint64{g, 0, 3}}
|
||||
a.pref = append(a.pref, ev)
|
||||
switch typ {
|
||||
default:
|
||||
panic(fmt.Sprintf("weird typ %s in makewaiting", evname(typ)))
|
||||
case EvGoWaiting, EvGoInSyscall:
|
||||
// ok
|
||||
}
|
||||
}
|
||||
|
||||
func (a *aux) makerunnable(i int, ev *Event) {
|
||||
// Create, Sched, Preempt, or Unblock
|
||||
switch a.gs[ev.G].state {
|
||||
case gDead:
|
||||
p := int32(a.my.batches[0].P)
|
||||
ev := &Event{Type: EvGoCreate, P: p,
|
||||
Ts: a.preftime(), Args: [3]uint64{ev.G, 0, 4}}
|
||||
a.pref = append(a.pref, ev)
|
||||
a.gs[ev.Args[0]] = gdesc{state: gRunnable, ev: ev, evCreate: ev}
|
||||
case gRunnable:
|
||||
return
|
||||
case gRunning:
|
||||
//a.prevs(i)
|
||||
a.err = fmt.Errorf("gRunning %d:%s", i, ev)
|
||||
case gWaiting:
|
||||
//a.prevs(i)
|
||||
a.err = fmt.Errorf("no consistent ordering possible %d:%s", i, ev)
|
||||
}
|
||||
}
|
||||
func (a *aux) makerunning(i int, ev *Event) {
|
||||
// GoStart once it is runnable
|
||||
switch a.g.state {
|
||||
case gDead:
|
||||
a.makerunnable(i, ev)
|
||||
case gRunnable:
|
||||
break
|
||||
case gRunning:
|
||||
return
|
||||
case gWaiting:
|
||||
a.err = fmt.Errorf("gWaiting in makerunnable %d:%s %+v", i, ev, a.g)
|
||||
}
|
||||
// PJW: which P? Probably need a ProcStart once
|
||||
if !a.px.running {
|
||||
a.procstart(ev.P)
|
||||
}
|
||||
p := ev.P
|
||||
if p == TimerP {
|
||||
p = int32(ev.Args[2]) // from events.go:71
|
||||
ev.Args[2] = 0
|
||||
}
|
||||
x := &Event{Type: EvGoStart, G: ev.G, P: p, Args: [3]uint64{ev.G, 0, 5}}
|
||||
x.Ts = ev.Ts - 1
|
||||
a.evs = append(a.evs, x)
|
||||
a.g.state = gRunning
|
||||
a.g.evStart = x
|
||||
a.px.g = x.G
|
||||
a.inserted[EvGoStart]++
|
||||
}
|
||||
|
||||
func (p *Parsed) robust(events []*Event) ([]*Event, *aux, error) { // *aux for debugging (CheckRobust)
|
||||
a := new(aux)
|
||||
a.gs = make(map[uint64]gdesc)
|
||||
a.ps = make(map[int32]pdesc)
|
||||
var evGC, evSTW *Event
|
||||
tasks := make(map[uint64]*Event) // task id to create
|
||||
activeSpans := make(map[uint64][]*Event)
|
||||
a.gs[0] = gdesc{state: gRunning} // bootstrap
|
||||
a.deleted = make(map[byte]int)
|
||||
a.inserted = make(map[byte]int)
|
||||
a.my = p
|
||||
a.input = events
|
||||
|
||||
for i, ev := range events {
|
||||
if a.err != nil {
|
||||
break
|
||||
}
|
||||
if i < len(events)-1 && ev.Ts == events[i+1].Ts &&
|
||||
i > a.last {
|
||||
// sigh. dragonfly, or similar trouble.
|
||||
// a.last is to avoid overlapping calls
|
||||
// This is a placeholder if needed.
|
||||
//a.reorder(i, events)
|
||||
ev = events[i]
|
||||
}
|
||||
var gok, pok bool
|
||||
a.g, gok = a.gs[ev.G]
|
||||
a.px, pok = a.ps[ev.P]
|
||||
switch ev.Type {
|
||||
case EvProcStart:
|
||||
if a.px.running { // This doesn't happen, but to be safe
|
||||
a.delete(i, ev) // already started
|
||||
continue
|
||||
}
|
||||
a.px.running = true
|
||||
case EvProcStop:
|
||||
if !pok { // Ok to delete, as we've never heard of it
|
||||
a.delete(i, ev)
|
||||
continue
|
||||
}
|
||||
if !a.px.running {
|
||||
a.procstart(ev.P)
|
||||
}
|
||||
if a.px.g != 0 {
|
||||
// p is running a g! Stop the g? Ignore the Stop?
|
||||
// Ignore the Stop. I don't think this happens.
|
||||
// (unless there are equal Ts's or the file is corrupt)
|
||||
a.err = fmt.Errorf("unexpected %d:%s %v", i, ev, a.px)
|
||||
// a.delete(i, ev) // PJW
|
||||
continue
|
||||
}
|
||||
a.px.running = false
|
||||
case EvGCStart:
|
||||
if evGC != nil {
|
||||
// already running; doesn't happen
|
||||
a.delete(i, ev)
|
||||
continue
|
||||
}
|
||||
evGC = ev
|
||||
case EvGCDone:
|
||||
if evGC == nil {
|
||||
// no GCStart to link it to: choice is lying about
|
||||
// the duration or the existence. Do the latter
|
||||
a.delete(i, ev)
|
||||
continue
|
||||
}
|
||||
evGC = nil
|
||||
case EvGCSTWStart:
|
||||
evp := &evSTW
|
||||
if p.Version < 1010 {
|
||||
// Before 1.10, EvGCSTWStart was per-P.
|
||||
evp = &a.px.evSTW
|
||||
}
|
||||
if *evp != nil {
|
||||
// still running; doesn't happen
|
||||
a.delete(i, ev)
|
||||
continue
|
||||
}
|
||||
*evp = ev
|
||||
case EvGCSTWDone:
|
||||
evp := &evSTW
|
||||
if p.Version < 1010 {
|
||||
// Before 1.10, EvGCSTWDone was per-P.
|
||||
evp = &a.px.evSTW
|
||||
}
|
||||
if *evp == nil {
|
||||
// no STWStart to link to: choice is lying about
|
||||
// duration or the existence. Do the latter.
|
||||
a.delete(i, ev)
|
||||
continue
|
||||
}
|
||||
*evp = nil
|
||||
case EvGCMarkAssistStart:
|
||||
if a.g.evMarkAssist != nil {
|
||||
// already running; doesn't happen
|
||||
a.delete(i, ev)
|
||||
continue
|
||||
}
|
||||
a.g.evMarkAssist = ev
|
||||
case EvGCMarkAssistDone:
|
||||
// ok to be in progress
|
||||
a.g.evMarkAssist = nil
|
||||
case EvGCSweepStart:
|
||||
if a.px.evSweep != nil {
|
||||
// older one still running; doesn't happen
|
||||
a.delete(i, ev)
|
||||
continue
|
||||
}
|
||||
a.px.evSweep = ev
|
||||
case EvGCSweepDone:
|
||||
if a.px.evSweep == nil {
|
||||
// no Start to link to: choice is lying about
|
||||
// duration or existence. Do the latter.
|
||||
a.delete(i, ev)
|
||||
continue
|
||||
}
|
||||
a.px.evSweep = nil
|
||||
case EvGoWaiting:
|
||||
if a.g.state != gRunnable {
|
||||
a.makerunnable(i, ev)
|
||||
}
|
||||
a.g.state = gWaiting
|
||||
a.g.ev = ev
|
||||
case EvGoInSyscall: // PJW: same as GoWaiting
|
||||
if a.g.state != gRunnable {
|
||||
a.makerunnable(i, ev)
|
||||
}
|
||||
a.g.state = gWaiting
|
||||
a.g.ev = ev
|
||||
case EvGoCreate:
|
||||
if _, ok := a.gs[ev.Args[0]]; ok {
|
||||
// this g already exists; doesn't happen
|
||||
a.delete(i, ev)
|
||||
continue
|
||||
}
|
||||
ret := checkRunning(a.px, a.g, ev, true)
|
||||
if ret&badRunning != 0 {
|
||||
a.makerunning(i, ev)
|
||||
a.g.state = gRunning
|
||||
}
|
||||
if ret&badP != 0 {
|
||||
a.procstart(ev.P)
|
||||
}
|
||||
a.gs[ev.Args[0]] = gdesc{state: gRunnable, ev: ev,
|
||||
evCreate: ev}
|
||||
case EvGoStart, EvGoStartLabel:
|
||||
if a.g.state != gRunnable {
|
||||
a.makerunnable(i, ev)
|
||||
}
|
||||
if a.px.g != 0 {
|
||||
//a.prevs(i)
|
||||
a.err = fmt.Errorf("p already running %d, %d:%s",
|
||||
a.px.g, i, ev)
|
||||
}
|
||||
a.g.state = gRunning
|
||||
a.g.evStart = ev // PJW: do we need g.evStart?
|
||||
a.px.g = ev.G
|
||||
a.g.evCreate = nil // PJW: do we need g.evCreate?
|
||||
case EvGoEnd, EvGoStop:
|
||||
if !gok {
|
||||
// never heard of it; act as if it never existed
|
||||
a.delete(i, ev)
|
||||
continue
|
||||
}
|
||||
ret := checkRunning(a.px, a.g, ev, false)
|
||||
if ret&badRunning != 0 {
|
||||
a.makerunning(i, ev)
|
||||
a.g.state = gRunning
|
||||
}
|
||||
if ret&badP != 0 {
|
||||
a.procstart(ev.P)
|
||||
}
|
||||
if ret&badG0 != 0 {
|
||||
// gok should have been false
|
||||
panic(fmt.Sprintf("badG0 %d:%s", i, ev))
|
||||
}
|
||||
a.g.evStart = nil
|
||||
a.g.state = gDead
|
||||
a.px.g = 0
|
||||
case EvGoSched, EvGoPreempt:
|
||||
ret := checkRunning(a.px, a.g, ev, false)
|
||||
if ret&badG0 != 0 {
|
||||
// hopeless, we think. Don't know g
|
||||
a.delete(i, ev)
|
||||
}
|
||||
if ret&badRunning != 0 {
|
||||
a.makerunning(i, ev)
|
||||
a.g.state = gRunning
|
||||
}
|
||||
if ret&badP != 0 {
|
||||
a.procstart(ev.P)
|
||||
}
|
||||
a.g.state = gRunnable
|
||||
a.g.evStart = nil
|
||||
a.px.g = 0
|
||||
a.g.ev = ev
|
||||
case EvGoUnblock:
|
||||
// g == 0 is ok here (PJW) and elsewhere?
|
||||
if a.g.state != gRunning {
|
||||
a.makerunning(i, ev)
|
||||
a.g.state = gRunning
|
||||
}
|
||||
if ev.P != TimerP && a.px.g != ev.G {
|
||||
//a.prevs(i)
|
||||
a.err = fmt.Errorf("%v not running %d:%s",
|
||||
a.px, i, ev)
|
||||
continue
|
||||
}
|
||||
g1, _ := a.gs[ev.Args[0]]
|
||||
if g1.state != gWaiting {
|
||||
a.makewaiting(i, ev.Args[0], EvGoWaiting)
|
||||
g1.state = gWaiting
|
||||
}
|
||||
g1.state = gRunnable
|
||||
g1.ev = ev
|
||||
a.gs[ev.Args[0]] = g1
|
||||
// if p == TimerP, clean up from events.go:71
|
||||
ev.Args[2] = 0 // no point in checking p
|
||||
case EvGoSysCall:
|
||||
if ev.G == 0 {
|
||||
// hopeless; don't know how to repair
|
||||
a.delete(i, ev)
|
||||
continue
|
||||
}
|
||||
ret := checkRunning(a.px, a.g, ev, false)
|
||||
if ret&badRunning != 0 {
|
||||
a.makerunning(i, ev)
|
||||
a.g.state = gRunning
|
||||
}
|
||||
if ret&badP != 0 {
|
||||
a.procstart(ev.P)
|
||||
}
|
||||
a.g.ev = ev
|
||||
case EvGoSysBlock:
|
||||
if ev.G == 0 {
|
||||
// hopeless to repair
|
||||
a.delete(i, ev)
|
||||
}
|
||||
ret := checkRunning(a.px, a.g, ev, false)
|
||||
if ret&badRunning != 0 {
|
||||
a.makerunning(i, ev)
|
||||
a.g.state = gRunning
|
||||
}
|
||||
if ret&badP != 0 {
|
||||
a.procstart(ev.P)
|
||||
}
|
||||
a.g.state = gWaiting
|
||||
a.g.evStart = nil
|
||||
a.px.g = 0
|
||||
case EvGoSysExit:
|
||||
if ev.G == 0 {
|
||||
// don't know how to repair
|
||||
a.delete(i, ev)
|
||||
continue
|
||||
}
|
||||
if a.g.state != gWaiting {
|
||||
a.makewaiting(i, ev.G, EvGoInSyscall)
|
||||
}
|
||||
a.g.state = gRunnable
|
||||
a.g.ev = ev
|
||||
case EvGoSleep, EvGoBlock, EvGoBlockSend, EvGoBlockRecv,
|
||||
EvGoBlockSelect, EvGoBlockSync, EvGoBlockCond,
|
||||
EvGoBlockNet, EvGoBlockGC:
|
||||
if ev.G == 0 { // don't know how to repair
|
||||
a.delete(i, ev)
|
||||
continue
|
||||
}
|
||||
ret := checkRunning(a.px, a.g, ev, false)
|
||||
if ret&badRunning != 0 {
|
||||
a.makerunning(i, ev)
|
||||
a.g.state = gRunning
|
||||
}
|
||||
if ret&badP != 0 {
|
||||
a.procstart(ev.P)
|
||||
}
|
||||
a.g.state = gWaiting
|
||||
a.g.ev = ev
|
||||
a.g.evStart = nil
|
||||
a.px.g = 0
|
||||
case EvHeapAlloc, EvGomaxprocs, EvNextGC, EvUserLog:
|
||||
a.makerunning(i, ev)
|
||||
a.g.state = gRunning
|
||||
a.px.g = ev.G
|
||||
default:
|
||||
return nil, nil, fmt.Errorf("robust: unexpected %d:%s", i, ev)
|
||||
case EvUserTaskCreate:
|
||||
taskid := ev.Args[0]
|
||||
if _, ok := tasks[taskid]; ok {
|
||||
// task id conflict, kill this one, believe the earlier one
|
||||
a.delete(i, ev)
|
||||
continue
|
||||
}
|
||||
tasks[ev.Args[0]] = ev
|
||||
case EvUserTaskEnd: // nothing to do
|
||||
case EvUserRegion:
|
||||
mode := ev.Args[1]
|
||||
spans := activeSpans[ev.G]
|
||||
if mode == 0 {
|
||||
activeSpans[ev.G] = append(spans, ev)
|
||||
} else if mode == 1 { // span end
|
||||
n := len(spans)
|
||||
if n > 0 {
|
||||
// check that spans match up; clean up if not
|
||||
s := spans[n-1]
|
||||
if s.Args[0] != ev.Args[0] ||
|
||||
s.SArgs[0] != ev.SArgs[0] {
|
||||
// try to fix it
|
||||
var ok bool
|
||||
spans, ok = fixSpan(spans, ev)
|
||||
if !ok {
|
||||
// unfixed, toss this event
|
||||
a.delete(i, ev)
|
||||
continue
|
||||
}
|
||||
}
|
||||
n = len(spans)
|
||||
if n > 1 {
|
||||
activeSpans[ev.G] = spans[:n-1]
|
||||
} else {
|
||||
delete(activeSpans, ev.G)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// invalid mode, toss it
|
||||
a.delete(i, ev)
|
||||
continue
|
||||
}
|
||||
}
|
||||
a.gs[ev.G] = a.g
|
||||
a.ps[ev.P] = a.px
|
||||
a.evs = append(a.evs, ev)
|
||||
}
|
||||
ans := a.pref
|
||||
ans = append(ans, a.evs...)
|
||||
p.Preflen = len(a.pref)
|
||||
p.Added = len(a.inserted)
|
||||
p.Ignored = len(a.deleted)
|
||||
return ans, a, a.err
|
||||
}
|
||||
|
||||
func fixSpan(spans []*Event, ev *Event) ([]*Event, bool) {
|
||||
// probably indicates a corrupt trace file
|
||||
panic("implement")
|
||||
}
|
||||
|
||||
type same struct {
|
||||
ev *Event
|
||||
g gdesc
|
||||
p pdesc
|
||||
}
|
||||
|
||||
// This is a placeholder, to organize intervals with equal time stamps
|
||||
func (a *aux) reorder(n int, events []*Event) {
|
||||
// bunch of Events with equal time stamps
|
||||
// We care about GoCreate, GoWaiting, GoInSyscall,
|
||||
// GoStart (StartLocal, StartLabel), GoBlock*,
|
||||
// GosSched, GoPreempt, GoUnblock, GoSysExit,
|
||||
// (UnblockLocal, SysExitLocal), GCStart.
|
||||
// maybe ProcStart and ProcStop?
|
||||
repair := []same{}
|
||||
i := n
|
||||
for ; i < len(events) && events[i].Ts == events[n].Ts; i++ {
|
||||
ev := events[i]
|
||||
repair = append(repair, same{ev, a.gs[ev.G],
|
||||
a.ps[ev.P]})
|
||||
}
|
||||
a.last = i - 1
|
||||
log.Println("BEFORE:")
|
||||
for i, r := range repair {
|
||||
log.Printf("x%d:%s %v %v", i+n, r.ev, r.g, r.p)
|
||||
}
|
||||
if true { // PJW
|
||||
return // we're not doing anything yet
|
||||
}
|
||||
// sorting is not going to be enough.
|
||||
log.Println("DID NOTHING!")
|
||||
log.Println("after")
|
||||
for i, r := range repair {
|
||||
log.Printf("y%d:%s %v %v", i+n, r.ev, r.g, r.p)
|
||||
}
|
||||
for i, r := range repair {
|
||||
events[n+i] = r.ev
|
||||
}
|
||||
}
|
||||
|
||||
// printing for debugging
|
||||
func (a *aux) prevs(n int) {
|
||||
for i := 0; i < len(a.pref); i++ {
|
||||
log.Printf("p%3d %s", i, a.pref[i])
|
||||
}
|
||||
start := 0
|
||||
if n > 50 {
|
||||
start = n - 50
|
||||
}
|
||||
for i := start; i <= n+1 && i < len(a.input); i++ {
|
||||
log.Printf("%4d %s", i, a.input[i])
|
||||
}
|
||||
}
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -1,498 +0,0 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package traceparser parses the trace files produced by runtime.StartTrace
|
||||
package traceparser
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"internal/traceparser/filebuf"
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Parsed is the result of parsing a trace file
|
||||
type Parsed struct {
|
||||
// Set by New
|
||||
Name string // File's name
|
||||
Size int64 // File's size
|
||||
Count int64 // approximate number of all events
|
||||
MaxTs int64 // range of all events, in nanoseconds
|
||||
Strings map[uint64]string
|
||||
Stacks map[uint32][]*Frame
|
||||
Version int // version of the trace file from header
|
||||
TicksPerSec int64 // from EvFrequency in trailer
|
||||
minticks, maxticks int64 // from Init
|
||||
r filebuf.Buf // implements io.Seek and io.Read
|
||||
batches []batch // location of each Batch and time of start
|
||||
timerGoids map[uint64]bool
|
||||
// the following are per Parse
|
||||
MinWant, MaxWant int64 // range wanted, from the arguments to Parse()
|
||||
Err error // set by internal functions to stop further processing
|
||||
Events []*Event // after Parse, the events from MinWant to MaxWant
|
||||
Preflen int // how long a prefix we added
|
||||
Ignored int // how many events we elided
|
||||
Added int // how many events we added, not including the prefix
|
||||
// internal processing variables
|
||||
seenArgs map[uint64]*[]uint64
|
||||
byproc map[int][]*Event
|
||||
lastGs map[int]uint64
|
||||
lastG uint64
|
||||
lastP int
|
||||
lastTs int64
|
||||
}
|
||||
|
||||
func (p *Parsed) String() string {
|
||||
ans := []string{}
|
||||
ans = append(ans, fmt.Sprintf("%s Sz:%d Count:%d MaxTs:%d #strs:%d #stks:%d",
|
||||
p.Name, p.Size, p.Count, p.MaxTs, len(p.Strings), len(p.Stacks)))
|
||||
ans = append(ans, fmt.Sprintf("%d clock:%d ticks:(%d,%d) #b:%d",
|
||||
p.Version, p.TicksPerSec, p.minticks, p.maxticks, len(p.batches)))
|
||||
return strings.Join(ans, "\n\t")
|
||||
}
|
||||
|
||||
// clean up after previous call to Parse
|
||||
func (p *Parsed) clean() {
|
||||
// some of these are redundant
|
||||
p.Err = nil
|
||||
p.Events = nil
|
||||
p.Preflen = 0
|
||||
p.Ignored = 0
|
||||
p.Added = 0
|
||||
p.seenArgs = nil // redundant, but safe
|
||||
p.byproc = nil
|
||||
p.lastGs = nil
|
||||
p.lastG = 0
|
||||
p.lastTs = 0
|
||||
}
|
||||
|
||||
// Frame is a frame in a stack traces
|
||||
type Frame struct {
|
||||
PC uint64
|
||||
Fn string
|
||||
File string
|
||||
Line int
|
||||
}
|
||||
|
||||
// An Event is the parsed form of a single trace event
|
||||
type Event struct {
|
||||
Type byte
|
||||
P int32
|
||||
Ts int64
|
||||
G uint64
|
||||
StkID uint32 // key to Parsed.Stacks
|
||||
Args [3]uint64
|
||||
SArgs []string // EvUserLog has 2. Others are 1 or none
|
||||
Link *Event
|
||||
}
|
||||
|
||||
// Batch remembers the EvBatch events. PJW: keep an index of User events?
|
||||
type batch struct {
|
||||
Off int
|
||||
P int64
|
||||
Cycles int64 // as read from EvBatch
|
||||
Nano int64 // start time of batch, set in commonInit()
|
||||
raws []rawEvent // filled in during Parse() for those batches that overlap the desired interval
|
||||
}
|
||||
|
||||
// rawEvent is a raw event parsed from batches that overlap the time interval
|
||||
type rawEvent struct { // about 75 bytes
|
||||
// the choice of what to share (args) and what to make unique per rawEvent
|
||||
// (arg0, sarg) was done by measuring the space impact of various choices.
|
||||
off uint32 // offset in batch (at batch.Off + off in file)
|
||||
typ byte
|
||||
arg0 uint64
|
||||
args *[]uint64 // remainder of the args (frequently nil), shared
|
||||
sarg string
|
||||
}
|
||||
|
||||
func (r rawEvent) String() string {
|
||||
if r.args != nil && len(*r.args) > 0 {
|
||||
return fmt.Sprintf("[%s %d %v %s]", evname(r.typ), r.arg0, *r.args, r.sarg)
|
||||
}
|
||||
return fmt.Sprintf("[%s, %d, [], %s]", evname(r.typ), r.arg0, r.sarg)
|
||||
}
|
||||
|
||||
// New scans the trace file, finding the number of events, the earliest and latest
|
||||
// timestamps, and the stacks and strings referenced in the file.
|
||||
func New(fname string) (*Parsed, error) {
|
||||
fd, err := filebuf.New(fname)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return commonInit(fd, fname)
|
||||
}
|
||||
|
||||
// ParseError may be returned by New() or ParseBuffer() to make available
|
||||
// some information in the case that the raw trace file seems to contain
|
||||
// negative time stamps. (In P, Name, Size, count, Strings, Stacks, Versions are valid,
|
||||
// and MaxTs or TicksPerSec is negative.)
|
||||
type ParseError struct {
|
||||
P *Parsed
|
||||
Err error
|
||||
}
|
||||
|
||||
func (pe ParseError) Error() string {
|
||||
return pe.Err.Error()
|
||||
}
|
||||
|
||||
func commonInit(fd filebuf.Buf, fname string) (*Parsed, error) {
|
||||
ans := &Parsed{Name: fname, minticks: 1 << 62} // minticks can only decrease
|
||||
var err error
|
||||
defer func() {
|
||||
if err != nil {
|
||||
fd.Close() // try to clean up after error
|
||||
}
|
||||
}()
|
||||
ans.Size = fd.Size()
|
||||
ans.r = fd
|
||||
// parseRaw here for header, trailer: clock, stacks, strings,
|
||||
if err = ans.parseHeader(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = ans.scanFile(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// done with seenArgs
|
||||
ans.seenArgs = nil
|
||||
// convert the clicks in batches to nanoseconds
|
||||
ans.toNanoseconds()
|
||||
if ans.MaxTs <= 0 || ans.TicksPerSec <= 0 {
|
||||
err := ParseError{
|
||||
P: ans,
|
||||
Err: fmt.Errorf("corrupt trace file: negative time: (max TS=%d, ticks per sec=%d",
|
||||
ans.MaxTs, ans.TicksPerSec),
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return ans, nil
|
||||
}
|
||||
|
||||
// Parse parses the events in the interval: start <= ts <= start+length.
|
||||
// f, if not nil, will be called at various stages of the parse, each identified by the string
|
||||
// argument. It could report on elapsed time, or memory usage, or whatever the user wants.
|
||||
// The number of times it is called and the contents of the string argument are both
|
||||
// changeable details of the implementation. Parse is not safe for concurrent use.
|
||||
func (p *Parsed) Parse(start, length int64, f func(string)) error {
|
||||
p.clean()
|
||||
if f == nil {
|
||||
f = func(string) {} // avoid any further testing for nil
|
||||
}
|
||||
|
||||
p.MinWant = start
|
||||
p.MaxWant = start + length
|
||||
// arrange the slice of batches by P
|
||||
byp := map[int64][]*batch{}
|
||||
// PJW: keep track of the order the Ps occur and use that for batchify
|
||||
for i, b := range p.batches {
|
||||
byp[b.P] = append(byp[b.P], &p.batches[i])
|
||||
p.batches[i].raws = nil // reset from last call to Parse
|
||||
}
|
||||
// batchify the ones that overlap the time range
|
||||
for _, v := range byp {
|
||||
for i := 0; i < len(v); i++ {
|
||||
b := v[i]
|
||||
var bnext *batch
|
||||
if i < len(v)-1 {
|
||||
bnext = v[i+1]
|
||||
}
|
||||
if b.Nano >= p.MaxWant {
|
||||
// starts too late
|
||||
continue
|
||||
} else if b.Nano <= p.MinWant && (bnext != nil && bnext.Nano <= p.MinWant) {
|
||||
// entirely too early
|
||||
continue
|
||||
}
|
||||
err := p.batchify(b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
f("batchify done")
|
||||
return p.createEvents(f)
|
||||
}
|
||||
|
||||
// ParseBuffer treats its argument as a trace file, and returns the
|
||||
// result of parsing it
|
||||
func ParseBuffer(rd io.Reader) (*Parsed, error) {
|
||||
pr, err := filebuf.FromReader(rd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p, err := commonInit(pr, "<buf>")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// need the version and the initial scan
|
||||
err = p.Parse(0, 1<<62, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// called from commonInit to compute the nanosecond when batches start
|
||||
func (p *Parsed) toNanoseconds() {
|
||||
minCycles := p.minticks
|
||||
freq := 1e9 / float64(p.TicksPerSec)
|
||||
// Batches, and more to come. Don't call this twice!
|
||||
for i, ex := range p.batches {
|
||||
p.batches[i].Nano = int64(float64(ex.Cycles-minCycles) * freq)
|
||||
}
|
||||
p.MaxTs = int64(float64(p.maxticks-minCycles) * freq)
|
||||
}
|
||||
|
||||
// argsAt returns the args of an event in the file and the offset for the next event.
|
||||
//
|
||||
// For EvString it returns off, nil, nil, and
|
||||
// for EvUserLog it ignores the string argument, which must be read by the
|
||||
// caller.
|
||||
func (p *Parsed) argsAt(off int, check byte) (int, []uint64, error) {
|
||||
off0 := off
|
||||
r := p.r
|
||||
loc, err := r.Seek(int64(off), 0)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
var buf [1]byte
|
||||
n, err := r.Read(buf[:])
|
||||
if err != nil || n != 1 {
|
||||
return 0, nil, fmt.Errorf("read failed at 0x%x, %d %v, loc=%d",
|
||||
off, n, err, loc)
|
||||
}
|
||||
off += n
|
||||
typ := buf[0] << 2 >> 2
|
||||
narg := buf[0]>>6 + 1
|
||||
inlineArgs := byte(4)
|
||||
|
||||
if typ == EvNone || typ >= EvCount ||
|
||||
EventDescriptions[typ].MinVersion > p.Version {
|
||||
return 0, nil, fmt.Errorf("unk type %v at offset 0x%x", typ, off0)
|
||||
}
|
||||
if typ == EvString { // skip, wihtout error checking
|
||||
_, off, err = readVal(r, off)
|
||||
var ln uint64
|
||||
ln, off, err = readVal(r, off)
|
||||
off += int(ln)
|
||||
return off, nil, nil
|
||||
}
|
||||
args := []uint64{}
|
||||
if narg < inlineArgs {
|
||||
for i := 0; i < int(narg); i++ {
|
||||
var v uint64
|
||||
v, off, err = readVal(r, off)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to read event %v argument at offset %v (%v)", typ, off, err)
|
||||
return 0, nil, err
|
||||
}
|
||||
args = append(args, v)
|
||||
}
|
||||
} else {
|
||||
// More than inlineArgs args, the first value is length of the event in bytes.
|
||||
var v uint64
|
||||
v, off, err = readVal(r, off)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to read event %v argument at offset %v (%v)", typ, off, err)
|
||||
return 0, nil, err
|
||||
}
|
||||
evLen := v
|
||||
off1 := off
|
||||
for evLen > uint64(off-off1) {
|
||||
v, off, err = readVal(r, off)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to read event %v argument at offset %v (%v)", typ, off, err)
|
||||
return 0, nil, err
|
||||
}
|
||||
args = append(args, v)
|
||||
}
|
||||
if evLen != uint64(off-off1) {
|
||||
err = fmt.Errorf("event has wrong length at offset 0x%x: want %v, got %v", off0, evLen, off-off1)
|
||||
return 0, nil, err
|
||||
}
|
||||
}
|
||||
// This routine does not read the string argument. Callers must tread EvUserLog specially.
|
||||
return off, args, nil
|
||||
}
|
||||
|
||||
// read a string from r
|
||||
func readStr(r io.Reader, off0 int) (s string, off int, err error) {
|
||||
var sz uint64
|
||||
sz, off, err = readVal(r, off0)
|
||||
if err != nil || sz == 0 {
|
||||
return "", off, err
|
||||
}
|
||||
if sz > 1e6 {
|
||||
return "", off, fmt.Errorf("string at offset %d is too large (len=%d)", off, sz)
|
||||
}
|
||||
buf := make([]byte, sz)
|
||||
n, err := io.ReadFull(r, buf)
|
||||
if err != nil || sz != uint64(n) {
|
||||
return "", off + n, fmt.Errorf("failed to read trace at offset %d: read %v, want %v, error %v", off, n, sz, err)
|
||||
}
|
||||
return string(buf), off + n, nil
|
||||
}
|
||||
|
||||
// readVal reads unsigned base-128 value from r.
|
||||
func readVal(r io.Reader, off0 int) (v uint64, off int, err error) {
|
||||
off = off0
|
||||
for i := 0; i < 10; i++ {
|
||||
var buf [1]byte
|
||||
var n int
|
||||
n, err = r.Read(buf[:])
|
||||
if err != nil || n != 1 {
|
||||
return 0, 0, fmt.Errorf("failed to read trace at offset %d: read %v, error %v", off0, n, err)
|
||||
}
|
||||
off++
|
||||
v |= uint64(buf[0]&0x7f) << (uint(i) * 7)
|
||||
if buf[0]&0x80 == 0 {
|
||||
return
|
||||
}
|
||||
}
|
||||
return 0, 0, fmt.Errorf("bad value at offset 0x%x", off0)
|
||||
}
|
||||
|
||||
// OSStats reports on the underlying i/o. If p was created by New,
|
||||
// the fields report filesystem activity. If p was created by ParseBuffer,
|
||||
// only Size is set.
|
||||
func (p *Parsed) OSStats() filebuf.Stat {
|
||||
return p.r.Stats()
|
||||
}
|
||||
|
||||
func (ev *Event) String() string {
|
||||
var tslink int64
|
||||
if ev.Link != nil {
|
||||
tslink = ev.Link.Ts
|
||||
}
|
||||
return fmt.Sprintf("[g:%d p:%d %s/%d %v %v %x ->%x]",
|
||||
ev.G, ev.P, evname(ev.Type), ev.Type,
|
||||
ev.Args, ev.SArgs, ev.Ts, tslink)
|
||||
|
||||
}
|
||||
|
||||
func evname(t byte) string {
|
||||
if t >= EvCount || t < 0 {
|
||||
return fmt.Sprintf("typ%d?", t)
|
||||
}
|
||||
return EventDescriptions[t].Name
|
||||
}
|
||||
|
||||
// Close the underlying file.
|
||||
func (p *Parsed) Close() error {
|
||||
return p.r.Close()
|
||||
}
|
||||
|
||||
// Event types in the trace.
|
||||
// Verbatim copy from src/runtime/trace.go with the "trace" prefix removed.
|
||||
const (
|
||||
EvNone = 0 // unused
|
||||
EvBatch = 1 // start of per-P batch of events [pid, timestamp]
|
||||
EvFrequency = 2 // contains tracer timer frequency [frequency (ticks per second)]
|
||||
EvStack = 3 // stack [stack id, number of PCs, array of {PC, func string ID, file string ID, line}]
|
||||
EvGomaxprocs = 4 // current value of GOMAXPROCS [timestamp, GOMAXPROCS, stack id]
|
||||
EvProcStart = 5 // start of P [timestamp, thread id]
|
||||
EvProcStop = 6 // stop of P [timestamp]
|
||||
EvGCStart = 7 // GC start [timestamp, seq, stack id]
|
||||
EvGCDone = 8 // GC done [timestamp]
|
||||
EvGCSTWStart = 9 // GC mark termination start [timestamp, kind]
|
||||
EvGCSTWDone = 10 // GC mark termination done [timestamp]
|
||||
EvGCSweepStart = 11 // GC sweep start [timestamp, stack id]
|
||||
EvGCSweepDone = 12 // GC sweep done [timestamp, swept, reclaimed]
|
||||
EvGoCreate = 13 // goroutine creation [timestamp, new goroutine id, new stack id, stack id]
|
||||
EvGoStart = 14 // goroutine starts running [timestamp, goroutine id, seq]
|
||||
EvGoEnd = 15 // goroutine ends [timestamp]
|
||||
EvGoStop = 16 // goroutine stops (like in select{}) [timestamp, stack]
|
||||
EvGoSched = 17 // goroutine calls Gosched [timestamp, stack]
|
||||
EvGoPreempt = 18 // goroutine is preempted [timestamp, stack]
|
||||
EvGoSleep = 19 // goroutine calls Sleep [timestamp, stack]
|
||||
EvGoBlock = 20 // goroutine blocks [timestamp, stack]
|
||||
EvGoUnblock = 21 // goroutine is unblocked [timestamp, goroutine id, seq, stack]
|
||||
EvGoBlockSend = 22 // goroutine blocks on chan send [timestamp, stack]
|
||||
EvGoBlockRecv = 23 // goroutine blocks on chan recv [timestamp, stack]
|
||||
EvGoBlockSelect = 24 // goroutine blocks on select [timestamp, stack]
|
||||
EvGoBlockSync = 25 // goroutine blocks on Mutex/RWMutex [timestamp, stack]
|
||||
EvGoBlockCond = 26 // goroutine blocks on Cond [timestamp, stack]
|
||||
EvGoBlockNet = 27 // goroutine blocks on network [timestamp, stack]
|
||||
EvGoSysCall = 28 // syscall enter [timestamp, stack]
|
||||
EvGoSysExit = 29 // syscall exit [timestamp, goroutine id, seq, real timestamp]
|
||||
EvGoSysBlock = 30 // syscall blocks [timestamp]
|
||||
EvGoWaiting = 31 // denotes that goroutine is blocked when tracing starts [timestamp, goroutine id]
|
||||
EvGoInSyscall = 32 // denotes that goroutine is in syscall when tracing starts [timestamp, goroutine id]
|
||||
EvHeapAlloc = 33 // memstats.heap_live change [timestamp, heap_alloc]
|
||||
EvNextGC = 34 // memstats.next_gc change [timestamp, next_gc]
|
||||
EvTimerGoroutine = 35 // denotes timer goroutine [timer goroutine id]
|
||||
EvFutileWakeup = 36 // denotes that the previous wakeup of this goroutine was futile [timestamp]
|
||||
EvString = 37 // string dictionary entry [ID, length, string]
|
||||
EvGoStartLocal = 38 // goroutine starts running on the same P as the last event [timestamp, goroutine id]
|
||||
EvGoUnblockLocal = 39 // goroutine is unblocked on the same P as the last event [timestamp, goroutine id, stack]
|
||||
EvGoSysExitLocal = 40 // syscall exit on the same P as the last event [timestamp, goroutine id, real timestamp]
|
||||
EvGoStartLabel = 41 // goroutine starts running with label [timestamp, goroutine id, seq, label string id]
|
||||
EvGoBlockGC = 42 // goroutine blocks on GC assist [timestamp, stack]
|
||||
EvGCMarkAssistStart = 43 // GC mark assist start [timestamp, stack]
|
||||
EvGCMarkAssistDone = 44 // GC mark assist done [timestamp]
|
||||
EvUserTaskCreate = 45 // trace.NewContext [timestamp, internal task id, internal parent id, stack, name string]
|
||||
EvUserTaskEnd = 46 // end of task [timestamp, internal task id, stack]
|
||||
EvUserRegion = 47 // trace.WithSpan [timestamp, internal task id, mode(0:start, 1:end), stack, name string]
|
||||
EvUserLog = 48 // trace.Log [timestamp, internal id, key string id, stack, value string]
|
||||
EvCount = 49
|
||||
)
|
||||
|
||||
// EventDescriptions describe the Events
|
||||
var EventDescriptions = [EvCount]struct {
|
||||
Name string
|
||||
MinVersion int
|
||||
Stack bool
|
||||
Args []string
|
||||
SArgs []string // string arguments
|
||||
}{
|
||||
EvNone: {"None", 1005, false, []string{}, nil},
|
||||
EvBatch: {"Batch", 1005, false, []string{"p", "ticks"}, nil}, // in 1.5 format it was {"p", "seq", "ticks"}
|
||||
EvFrequency: {"Frequency", 1005, false, []string{"freq"}, nil}, // in 1.5 format it was {"freq", "unused"}
|
||||
EvStack: {"Stack", 1005, false, []string{"id", "siz"}, nil},
|
||||
EvGomaxprocs: {"Gomaxprocs", 1005, true, []string{"procs"}, nil},
|
||||
EvProcStart: {"ProcStart", 1005, false, []string{"thread"}, nil},
|
||||
EvProcStop: {"ProcStop", 1005, false, []string{}, nil},
|
||||
EvGCStart: {"GCStart", 1005, true, []string{"seq"}, nil}, // in 1.5 format it was {}
|
||||
EvGCDone: {"GCDone", 1005, false, []string{}, nil},
|
||||
EvGCSTWStart: {"GCSTWStart", 1005, false, []string{"kindid"}, []string{"kind"}}, // <= 1.9, args was {} (implicitly {0})
|
||||
EvGCSTWDone: {"GCSTWDone", 1005, false, []string{}, nil},
|
||||
EvGCSweepStart: {"GCSweepStart", 1005, true, []string{}, nil},
|
||||
EvGCSweepDone: {"GCSweepDone", 1005, false, []string{"swept", "reclaimed"}, nil}, // before 1.9, format was {}
|
||||
EvGoCreate: {"GoCreate", 1005, true, []string{"g", "stack"}, nil},
|
||||
EvGoStart: {"GoStart", 1005, false, []string{"g", "seq"}, nil}, // in 1.5 format it was {"g"}
|
||||
EvGoEnd: {"GoEnd", 1005, false, []string{}, nil},
|
||||
EvGoStop: {"GoStop", 1005, true, []string{}, nil},
|
||||
EvGoSched: {"GoSched", 1005, true, []string{}, nil},
|
||||
EvGoPreempt: {"GoPreempt", 1005, true, []string{}, nil},
|
||||
EvGoSleep: {"GoSleep", 1005, true, []string{}, nil},
|
||||
EvGoBlock: {"GoBlock", 1005, true, []string{}, nil},
|
||||
EvGoUnblock: {"GoUnblock", 1005, true, []string{"g", "seq"}, nil}, // in 1.5 format it was {"g"}
|
||||
EvGoBlockSend: {"GoBlockSend", 1005, true, []string{}, nil},
|
||||
EvGoBlockRecv: {"GoBlockRecv", 1005, true, []string{}, nil},
|
||||
EvGoBlockSelect: {"GoBlockSelect", 1005, true, []string{}, nil},
|
||||
EvGoBlockSync: {"GoBlockSync", 1005, true, []string{}, nil},
|
||||
EvGoBlockCond: {"GoBlockCond", 1005, true, []string{}, nil},
|
||||
EvGoBlockNet: {"GoBlockNet", 1005, true, []string{}, nil},
|
||||
EvGoSysCall: {"GoSysCall", 1005, true, []string{}, nil},
|
||||
EvGoSysExit: {"GoSysExit", 1005, false, []string{"g", "seq", "ts"}, nil},
|
||||
EvGoSysBlock: {"GoSysBlock", 1005, false, []string{}, nil},
|
||||
EvGoWaiting: {"GoWaiting", 1005, false, []string{"g"}, nil},
|
||||
EvGoInSyscall: {"GoInSyscall", 1005, false, []string{"g"}, nil},
|
||||
EvHeapAlloc: {"HeapAlloc", 1005, false, []string{"mem"}, nil},
|
||||
EvNextGC: {"NextGC", 1005, false, []string{"mem"}, nil},
|
||||
EvTimerGoroutine: {"TimerGoroutine", 1005, false, []string{"g"}, nil}, // in 1.5 format it was {"g", "unused"}
|
||||
EvFutileWakeup: {"FutileWakeup", 1005, false, []string{}, nil},
|
||||
EvString: {"String", 1007, false, []string{}, nil},
|
||||
EvGoStartLocal: {"GoStartLocal", 1007, false, []string{"g"}, nil},
|
||||
EvGoUnblockLocal: {"GoUnblockLocal", 1007, true, []string{"g"}, nil},
|
||||
EvGoSysExitLocal: {"GoSysExitLocal", 1007, false, []string{"g", "ts"}, nil},
|
||||
EvGoStartLabel: {"GoStartLabel", 1008, false, []string{"g", "seq", "labelid"}, []string{"label"}},
|
||||
EvGoBlockGC: {"GoBlockGC", 1008, true, []string{}, nil},
|
||||
EvGCMarkAssistStart: {"GCMarkAssistStart", 1009, true, []string{}, nil},
|
||||
EvGCMarkAssistDone: {"GCMarkAssistDone", 1009, false, []string{}, nil},
|
||||
EvUserTaskCreate: {"UserTaskCreate", 1011, true, []string{"taskid", "pid", "typeid"}, []string{"name"}},
|
||||
EvUserTaskEnd: {"UserTaskEnd", 1011, true, []string{"taskid"}, nil},
|
||||
EvUserRegion: {"UserRegion", 1011, true, []string{"taskid", "mode", "typeid"}, []string{"name"}},
|
||||
EvUserLog: {"UserLog", 1011, true, []string{"id", "keyid"}, []string{"category", "message"}},
|
||||
}
|
@ -1,52 +0,0 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package traceparser
|
||||
|
||||
// test routines used by tests in the trace commands
|
||||
|
||||
import "bytes"
|
||||
|
||||
// Writer is a test trace writer.
|
||||
type Writer struct {
|
||||
bytes.Buffer
|
||||
}
|
||||
|
||||
// NewWriter provides the test Writer
|
||||
func NewWriter() *Writer {
|
||||
w := new(Writer)
|
||||
w.Write([]byte("go 1.9 trace\x00\x00\x00\x00"))
|
||||
return w
|
||||
}
|
||||
|
||||
// Emit writes an event record to the trace.
|
||||
// See Event types for valid types and required arguments.
|
||||
func (w *Writer) Emit(typ byte, args ...uint64) {
|
||||
nargs := byte(len(args)) - 1
|
||||
if nargs > 3 {
|
||||
nargs = 3
|
||||
}
|
||||
buf := []byte{typ | nargs<<6}
|
||||
if nargs == 3 {
|
||||
buf = append(buf, 0)
|
||||
}
|
||||
for _, a := range args {
|
||||
buf = appendVarint(buf, a)
|
||||
}
|
||||
if nargs == 3 {
|
||||
buf[1] = byte(len(buf) - 2)
|
||||
}
|
||||
n, err := w.Write(buf)
|
||||
if n != len(buf) || err != nil {
|
||||
panic("failed to write")
|
||||
}
|
||||
}
|
||||
|
||||
func appendVarint(buf []byte, v uint64) []byte {
|
||||
for ; v >= 0x80; v >>= 7 {
|
||||
buf = append(buf, 0x80|byte(v))
|
||||
}
|
||||
buf = append(buf, byte(v))
|
||||
return buf
|
||||
}
|
Loading…
Reference in New Issue
Block a user