1
0
mirror of https://github.com/golang/go synced 2024-09-25 03:20:13 -06:00

[dev.cc] all: merge master (48469a2) into dev.cc

Change-Id: I10f7950d173b302151f2a31daebce297b4306ebe
This commit is contained in:
Russ Cox 2015-02-23 10:15:35 -05:00
commit de50bad121
35 changed files with 4980 additions and 957 deletions

6
misc/trace/README.md Normal file
View File

@ -0,0 +1,6 @@
This directory contains helper file for trace viewer (go tool trace).
trace_viewer_lean.html was generated following instructions in:
https://github.com/google/trace-viewer/wiki/Embedding
on revision 895aa74558d19d91906fb720df6458244ef160c6 using:
trace-viewer$ ./vulcanize_trace_viewer --config=lean

File diff suppressed because one or more lines are too long

View File

@ -411,6 +411,7 @@ var goTools = map[string]targetDir{
"cmd/objwriter": toTool,
"cmd/pack": toTool,
"cmd/pprof": toTool,
"cmd/trace": toTool,
"cmd/yacc": toTool,
"golang.org/x/tools/cmd/cover": toTool,
"golang.org/x/tools/cmd/godoc": toBin,

View File

@ -346,4 +346,4 @@ void ldmain(int, char**);
#pragma varargck argpos diag 1
#define SYMDEF "__.GOSYMDEF"
#define SYMDEF "__.GOSYMDEF"

328
src/cmd/trace/goroutines.go Normal file
View File

@ -0,0 +1,328 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Goroutine-related profiles.
package main
import (
"fmt"
"html/template"
"internal/trace"
"net/http"
"sort"
"strconv"
)
func init() {
http.HandleFunc("/goroutines", httpGoroutines)
http.HandleFunc("/goroutine", httpGoroutine)
}
// gtype describes a group of goroutines grouped by start PC.
type gtype struct {
ID uint64 // Unique identifier (PC).
Name string // Start function.
N int // Total number of goroutines in this group.
ExecTime int64 // Total execution time of all goroutines in this group.
}
type gtypeList []gtype
func (l gtypeList) Len() int {
return len(l)
}
func (l gtypeList) Less(i, j int) bool {
return l[i].ExecTime > l[j].ExecTime
}
func (l gtypeList) Swap(i, j int) {
l[i], l[j] = l[j], l[i]
}
// gdesc desribes a single goroutine.
type gdesc struct {
ID uint64
Name string
PC uint64
CreateTime int64
StartTime int64
EndTime int64
LastStart int64
ExecTime int64
SchedWaitTime int64
IOTime int64
BlockTime int64
SyscallTime int64
GCTime int64
SweepTime int64
TotalTime int64
blockNetTime int64
blockSyncTime int64
blockSyscallTime int64
blockSweepTime int64
blockGCTime int64
blockSchedTime int64
}
type gdescList []*gdesc
func (l gdescList) Len() int {
return len(l)
}
func (l gdescList) Less(i, j int) bool {
return l[i].TotalTime > l[j].TotalTime
}
func (l gdescList) Swap(i, j int) {
l[i], l[j] = l[j], l[i]
}
var gs = make(map[uint64]*gdesc)
// analyzeGoroutines generates list gdesc's from the trace and stores it in gs.
func analyzeGoroutines(events []*trace.Event) {
if len(gs) > 0 { //!!! racy
return
}
var lastTs int64
var gcStartTime int64
for _, ev := range events {
lastTs = ev.Ts
switch ev.Type {
case trace.EvGoCreate:
g := &gdesc{CreateTime: ev.Ts}
g.blockSchedTime = ev.Ts
gs[ev.Args[0]] = g
case trace.EvGoStart:
g := gs[ev.G]
if g.PC == 0 {
g.PC = ev.Stk[0].PC
g.Name = ev.Stk[0].Fn
}
g.LastStart = ev.Ts
if g.StartTime == 0 {
g.StartTime = ev.Ts
}
if g.blockSchedTime != 0 {
g.SchedWaitTime += ev.Ts - g.blockSchedTime
g.blockSchedTime = 0
}
case trace.EvGoEnd, trace.EvGoStop:
g := gs[ev.G]
g.ExecTime += ev.Ts - g.LastStart
g.TotalTime = ev.Ts - g.CreateTime
g.EndTime = ev.Ts
case trace.EvGoBlockSend, trace.EvGoBlockRecv, trace.EvGoBlockSelect,
trace.EvGoBlockSync, trace.EvGoBlockCond:
g := gs[ev.G]
g.ExecTime += ev.Ts - g.LastStart
g.blockSyncTime = ev.Ts
case trace.EvGoSched, trace.EvGoPreempt:
g := gs[ev.G]
g.ExecTime += ev.Ts - g.LastStart
g.blockSchedTime = ev.Ts
case trace.EvGoSleep, trace.EvGoBlock:
g := gs[ev.G]
g.ExecTime += ev.Ts - g.LastStart
case trace.EvGoBlockNet:
g := gs[ev.G]
g.ExecTime += ev.Ts - g.LastStart
g.blockNetTime = ev.Ts
case trace.EvGoUnblock:
g := gs[ev.Args[0]]
if g.blockNetTime != 0 {
g.IOTime += ev.Ts - g.blockNetTime
g.blockNetTime = 0
}
if g.blockSyncTime != 0 {
g.BlockTime += ev.Ts - g.blockSyncTime
g.blockSyncTime = 0
}
g.blockSchedTime = ev.Ts
case trace.EvGoSysBlock:
g := gs[ev.G]
g.ExecTime += ev.Ts - g.LastStart
g.blockSyscallTime = ev.Ts
case trace.EvGoSysExit:
g := gs[ev.G]
if g.blockSyscallTime != 0 {
g.SyscallTime += ev.Ts - g.blockSyscallTime
g.blockSyscallTime = 0
}
g.blockSchedTime = ev.Ts
case trace.EvGCSweepStart:
g := gs[ev.G]
if g != nil {
// Sweep can happen during GC on system goroutine.
g.blockSweepTime = ev.Ts
}
case trace.EvGCSweepDone:
g := gs[ev.G]
if g != nil && g.blockSweepTime != 0 {
g.SweepTime += ev.Ts - g.blockSweepTime
g.blockSweepTime = 0
}
case trace.EvGCStart:
gcStartTime = ev.Ts
case trace.EvGCDone:
for _, g := range gs {
if g.EndTime == 0 {
g.GCTime += ev.Ts - gcStartTime
}
}
}
}
for _, g := range gs {
if g.TotalTime == 0 {
g.TotalTime = lastTs - g.CreateTime
}
if g.EndTime == 0 {
g.EndTime = lastTs
}
if g.blockNetTime != 0 {
g.IOTime += lastTs - g.blockNetTime
g.blockNetTime = 0
}
if g.blockSyncTime != 0 {
g.BlockTime += lastTs - g.blockSyncTime
g.blockSyncTime = 0
}
if g.blockSyscallTime != 0 {
g.SyscallTime += lastTs - g.blockSyscallTime
g.blockSyscallTime = 0
}
if g.blockSchedTime != 0 {
g.SchedWaitTime += lastTs - g.blockSchedTime
g.blockSchedTime = 0
}
}
}
// httpGoroutines serves list of goroutine groups.
func httpGoroutines(w http.ResponseWriter, r *http.Request) {
events, err := parseEvents()
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
analyzeGoroutines(events)
gss := make(map[uint64]gtype)
for _, g := range gs {
gs1 := gss[g.PC]
gs1.ID = g.PC
gs1.Name = g.Name
gs1.N++
gs1.ExecTime += g.ExecTime
gss[g.PC] = gs1
}
var glist gtypeList
for k, v := range gss {
v.ID = k
glist = append(glist, v)
}
sort.Sort(glist)
templGoroutines.Execute(w, glist)
}
var templGoroutines = template.Must(template.New("").Parse(`
<html>
<body>
Goroutines: <br>
{{range $}}
<a href="/goroutine?id={{.ID}}">{{.Name}}</a> N={{.N}} <br>
{{end}}
</body>
</html>
`))
// httpGoroutine serves list of goroutines in a particular group.
func httpGoroutine(w http.ResponseWriter, r *http.Request) {
events, err := parseEvents()
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
pc, err := strconv.ParseUint(r.FormValue("id"), 10, 64)
if err != nil {
http.Error(w, fmt.Sprintf("failed to parse id parameter '%v': %v", r.FormValue("id"), err), http.StatusInternalServerError)
return
}
analyzeGoroutines(events)
var glist gdescList
for gid, g := range gs {
if g.PC != pc || g.ExecTime == 0 {
continue
}
g.ID = gid
glist = append(glist, g)
}
sort.Sort(glist)
err = templGoroutine.Execute(w, glist)
if err != nil {
http.Error(w, fmt.Sprintf("failed to execute template: %v", err), http.StatusInternalServerError)
return
}
}
var templGoroutine = template.Must(template.New("").Parse(`
<html>
<body>
<table border="1" sortable="1">
<tr>
<th> Goroutine </th>
<th> Total time, ns </th>
<th> Execution time, ns </th>
<th> Network wait time, ns </th>
<th> Sync block time, ns </th>
<th> Blocking syscall time, ns </th>
<th> Scheduler wait time, ns </th>
<th> GC sweeping time, ns </th>
<th> GC pause time, ns </th>
</tr>
{{range $}}
<tr>
<td> <a href="/trace?goid={{.ID}}">{{.ID}}</a> </td>
<td> {{.TotalTime}} </td>
<td> {{.ExecTime}} </td>
<td> {{.IOTime}} </td>
<td> {{.BlockTime}} </td>
<td> {{.SyscallTime}} </td>
<td> {{.SchedWaitTime}} </td>
<td> {{.SweepTime}} </td>
<td> {{.GCTime}} </td>
</tr>
{{end}}
</table>
</body>
</html>
`))
// relatedGoroutines finds set of related goroutines that we need to include
// into trace for goroutine goid.
func relatedGoroutines(events []*trace.Event, goid uint64) map[uint64]bool {
// BFS of depth 2 over "unblock" edges
// (what goroutines unblock goroutine goid?).
gmap := make(map[uint64]bool)
gmap[goid] = true
for i := 0; i < 2; i++ {
gmap1 := make(map[uint64]bool)
for g := range gmap {
gmap1[g] = true
}
for _, ev := range events {
if ev.Type == trace.EvGoUnblock && gmap[ev.Args[0]] {
gmap1[ev.G] = true
}
}
gmap = gmap1
}
gmap[0] = true // for GC events
return gmap
}

156
src/cmd/trace/main.go Normal file
View File

@ -0,0 +1,156 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Trace is a tool for viewing trace files.
Trace files can be generated with:
- runtime/pprof.StartTrace
- net/http/pprof package
- go test -trace
Example usage:
Generate a trace file with 'go test':
go test -trace trace.out pkg
View the trace in a web browser:
go tool trace pkg.test trace.out
*/
package main
import (
"bufio"
"flag"
"fmt"
"internal/trace"
"net"
"net/http"
"os"
"os/exec"
"runtime"
"sync"
)
const usageMessage = "" +
`Usage of 'go tool trace':
Given a trace file produced by 'go test':
go test -trace=trace.out pkg
Open a web browser displaying trace:
go tool trace [flags] pkg.test trace.out
Flags:
-http=addr: HTTP service address (e.g., ':6060')
`
var (
httpFlag = flag.String("http", "localhost:0", "HTTP service address (e.g., ':6060')")
// The binary file name, left here for serveSVGProfile.
programBinary string
traceFile string
)
func main() {
flag.Usage = func() {
fmt.Fprintln(os.Stderr, usageMessage)
os.Exit(2)
}
flag.Parse()
// Usage information when no arguments.
if flag.NArg() != 2 {
flag.Usage()
}
programBinary = flag.Arg(0)
traceFile = flag.Arg(1)
ln, err := net.Listen("tcp", *httpFlag)
if err != nil {
dief("failed to create server socket: %v\n", err)
}
// Open browser.
if !startBrowser("http://" + ln.Addr().String()) {
dief("failed to start browser\n")
}
// Parse and symbolize trace asynchronously while browser opens.
go parseEvents()
// Start http server.
http.HandleFunc("/", httpMain)
err = http.Serve(ln, nil)
dief("failed to start http server: %v\n", err)
}
var loader struct {
once sync.Once
events []*trace.Event
err error
}
func parseEvents() ([]*trace.Event, error) {
loader.once.Do(func() {
tracef, err := os.Open(flag.Arg(1))
if err != nil {
loader.err = fmt.Errorf("failed to open trace file: %v", err)
return
}
defer tracef.Close()
// Parse and symbolize.
events, err := trace.Parse(bufio.NewReader(tracef))
if err != nil {
loader.err = fmt.Errorf("failed to parse trace: %v", err)
return
}
err = trace.Symbolize(events, programBinary)
if err != nil {
loader.err = fmt.Errorf("failed to symbolize trace: %v", err)
return
}
loader.events = events
})
return loader.events, loader.err
}
// httpMain serves the starting page.
func httpMain(w http.ResponseWriter, r *http.Request) {
w.Write(templMain)
}
var templMain = []byte(`
<html>
<body>
<a href="/trace">View trace</a><br>
<a href="/goroutines">Goroutine analysis</a><br>
<a href="/io">IO blocking profile</a><br>
<a href="/block">Synchronization blocking profile</a><br>
<a href="/syscall">Syscall blocking profile</a><br>
<a href="/sched">Scheduler latency profile</a><br>
</body>
</html>
`)
// startBrowser tries to open the URL in a browser
// and reports whether it succeeds.
// Note: copied from x/tools/cmd/cover/html.go
func startBrowser(url string) bool {
// try to start the browser
var args []string
switch runtime.GOOS {
case "darwin":
args = []string{"open"}
case "windows":
args = []string{"cmd", "/c", "start"}
default:
args = []string{"xdg-open"}
}
cmd := exec.Command(args[0], append(args[1:], url)...)
return cmd.Start() == nil
}
func dief(msg string, args ...interface{}) {
fmt.Fprintf(os.Stderr, msg, args...)
os.Exit(1)
}

162
src/cmd/trace/pprof.go Normal file
View File

@ -0,0 +1,162 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Serving of pprof-like profiles.
package main
import (
"bufio"
"fmt"
"internal/trace"
"io/ioutil"
"net/http"
"os"
"os/exec"
)
func init() {
http.HandleFunc("/io", httpIO)
http.HandleFunc("/block", httpBlock)
http.HandleFunc("/syscall", httpSyscall)
http.HandleFunc("/sched", httpSched)
}
// Record represents one entry in pprof-like profiles.
type Record struct {
stk []*trace.Frame
n uint64
time int64
}
// httpIO serves IO pprof-like profile (time spent in IO wait).
func httpIO(w http.ResponseWriter, r *http.Request) {
events, err := parseEvents()
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
prof := make(map[uint64]Record)
for _, ev := range events {
if ev.Type != trace.EvGoBlockNet || ev.Link == nil || ev.StkID == 0 || len(ev.Stk) == 0 {
continue
}
rec := prof[ev.StkID]
rec.stk = ev.Stk
rec.n++
rec.time += ev.Link.Ts - ev.Ts
prof[ev.StkID] = rec
}
serveSVGProfile(w, r, prof)
}
// httpBlock serves blocking pprof-like profile (time spent blocked on synchronization primitives).
func httpBlock(w http.ResponseWriter, r *http.Request) {
events, err := parseEvents()
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
prof := make(map[uint64]Record)
for _, ev := range events {
switch ev.Type {
case trace.EvGoBlockSend, trace.EvGoBlockRecv, trace.EvGoBlockSelect,
trace.EvGoBlockSync, trace.EvGoBlockCond:
default:
continue
}
if ev.Link == nil || ev.StkID == 0 || len(ev.Stk) == 0 {
continue
}
rec := prof[ev.StkID]
rec.stk = ev.Stk
rec.n++
rec.time += ev.Link.Ts - ev.Ts
prof[ev.StkID] = rec
}
serveSVGProfile(w, r, prof)
}
// httpSyscall serves syscall pprof-like profile (time spent blocked in syscalls).
func httpSyscall(w http.ResponseWriter, r *http.Request) {
events, err := parseEvents()
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
prof := make(map[uint64]Record)
for _, ev := range events {
if ev.Type != trace.EvGoSysCall || ev.Link == nil || ev.StkID == 0 || len(ev.Stk) == 0 {
continue
}
rec := prof[ev.StkID]
rec.stk = ev.Stk
rec.n++
rec.time += ev.Link.Ts - ev.Ts
prof[ev.StkID] = rec
}
serveSVGProfile(w, r, prof)
}
// httpSched serves scheduler latency pprof-like profile
// (time between a goroutine become runnable and actually scheduled for execution).
func httpSched(w http.ResponseWriter, r *http.Request) {
events, err := parseEvents()
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
prof := make(map[uint64]Record)
for _, ev := range events {
if (ev.Type != trace.EvGoUnblock && ev.Type != trace.EvGoCreate) ||
ev.Link == nil || ev.StkID == 0 || len(ev.Stk) == 0 {
continue
}
rec := prof[ev.StkID]
rec.stk = ev.Stk
rec.n++
rec.time += ev.Link.Ts - ev.Ts
prof[ev.StkID] = rec
}
serveSVGProfile(w, r, prof)
}
// generateSVGProfile generates pprof-like profile stored in prof and writes in to w.
func serveSVGProfile(w http.ResponseWriter, r *http.Request, prof map[uint64]Record) {
blockf, err := ioutil.TempFile("", "block")
if err != nil {
http.Error(w, fmt.Sprintf("failed to create temp file: %v", err), http.StatusInternalServerError)
return
}
defer os.Remove(blockf.Name())
blockb := bufio.NewWriter(blockf)
fmt.Fprintf(blockb, "--- contention:\ncycles/second=1000000000\n")
for _, rec := range prof {
fmt.Fprintf(blockb, "%v %v @", rec.time, rec.n)
for _, f := range rec.stk {
fmt.Fprintf(blockb, " 0x%x", f.PC)
}
fmt.Fprintf(blockb, "\n")
}
err = blockb.Flush()
if err != nil {
http.Error(w, fmt.Sprintf("failed to flush temp file: %v", err), http.StatusInternalServerError)
return
}
err = blockf.Close()
if err != nil {
http.Error(w, fmt.Sprintf("failed to close temp file: %v", err), http.StatusInternalServerError)
return
}
svgFilename := blockf.Name() + ".svg"
_, err = exec.Command("go", "tool", "pprof", "-svg", "-output", svgFilename, programBinary, blockf.Name()).CombinedOutput()
if err != nil {
http.Error(w, fmt.Sprintf("failed to execute go tool pprof: %v", err), http.StatusInternalServerError)
return
}
defer os.Remove(svgFilename)
w.Header().Set("Content-Type", "image/svg+xml")
http.ServeFile(w, r, svgFilename)
}

434
src/cmd/trace/trace.go Normal file
View File

@ -0,0 +1,434 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"encoding/json"
"fmt"
"internal/trace"
"net/http"
"os"
"path/filepath"
"strconv"
"strings"
)
func init() {
http.HandleFunc("/trace", httpTrace)
http.HandleFunc("/jsontrace", httpJsonTrace)
http.HandleFunc("/trace_viewer_html", httpTraceViewerHTML)
}
// httpTrace serves either whole trace (goid==0) or trace for goid goroutine.
func httpTrace(w http.ResponseWriter, r *http.Request) {
_, err := parseEvents()
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
params := ""
if goids := r.FormValue("goid"); goids != "" {
goid, err := strconv.ParseUint(goids, 10, 64)
if err != nil {
http.Error(w, fmt.Sprintf("failed to parse goid parameter '%v': %v", goids, err), http.StatusInternalServerError)
return
}
params = fmt.Sprintf("?goid=%v", goid)
}
html := strings.Replace(templTrace, "{{PARAMS}}", params, -1)
w.Write([]byte(html))
}
var templTrace = `
<html>
<head>
<link href="/trace_viewer_html" rel="import">
<script>
document.addEventListener("DOMContentLoaded", function(event) {
var viewer = new tv.TraceViewer('/jsontrace{{PARAMS}}');
document.body.appendChild(viewer);
});
</script>
</head>
<body>
</body>
</html>
`
// httpTraceViewerHTML serves static part of trace-viewer.
// This URL is queried from templTrace HTML.
func httpTraceViewerHTML(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, filepath.Join(os.Getenv("GOROOT"), "misc", "trace", "trace_viewer_lean.html"))
}
// httpJsonTrace serves json trace, requested from within templTrace HTML.
func httpJsonTrace(w http.ResponseWriter, r *http.Request) {
events, err := parseEvents()
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
params := &traceParams{
events: events,
endTime: int64(1<<63 - 1),
}
if goids := r.FormValue("goid"); goids != "" {
goid, err := strconv.ParseUint(goids, 10, 64)
if err != nil {
http.Error(w, fmt.Sprintf("failed to parse goid parameter '%v': %v", goids, err), http.StatusInternalServerError)
return
}
analyzeGoroutines(events)
g := gs[goid]
params.gtrace = true
params.startTime = g.StartTime
params.endTime = g.EndTime
params.maing = goid
params.gs = relatedGoroutines(events, goid)
}
err = json.NewEncoder(w).Encode(generateTrace(params))
if err != nil {
http.Error(w, fmt.Sprintf("failed to serialize trace: %v", err), http.StatusInternalServerError)
return
}
}
type traceParams struct {
events []*trace.Event
gtrace bool
startTime int64
endTime int64
maing uint64
gs map[uint64]bool
}
type traceContext struct {
*traceParams
data ViewerData
frameTree frameNode
frameSeq int
arrowSeq uint64
heapAlloc uint64
nextGC uint64
gcount uint64
grunnable uint64
grunning uint64
insyscall uint64
prunning uint64
}
type frameNode struct {
id int
children map[uint64]frameNode
}
type ViewerData struct {
Events []*ViewerEvent `json:"traceEvents"`
Frames map[string]ViewerFrame `json:"stackFrames"`
}
type ViewerEvent struct {
Name string `json:"name,omitempty"`
Phase string `json:"ph"`
Scope string `json:"s,omitempty"`
Time int64 `json:"ts"`
Dur int64 `json:"dur,omitempty"`
Pid uint64 `json:"pid"`
Tid uint64 `json:"tid"`
ID uint64 `json:"id,omitempty"`
Stack int `json:"sf,omitempty"`
EndStack int `json:"esf,omitempty"`
Arg interface{} `json:"args,omitempty"`
}
type ViewerFrame struct {
Name string `json:"name"`
Parent int `json:"parent,omitempty"`
}
type NameArg struct {
Name string `json:"name"`
}
type SortIndexArg struct {
Index int `json:"sort_index"`
}
// generateTrace generates json trace for trace-viewer:
// https://github.com/google/trace-viewer
// Trace format is described at:
// https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/view
// If gtrace=true, generate trace for goroutine goid, otherwise whole trace.
// startTime, endTime determine part of the trace that we are interested in.
// gset restricts goroutines that are included in the resulting trace.
func generateTrace(params *traceParams) ViewerData {
ctx := &traceContext{traceParams: params}
ctx.frameTree.children = make(map[uint64]frameNode)
ctx.data.Frames = make(map[string]ViewerFrame)
maxProc := 0
gnames := make(map[uint64]string)
for _, ev := range ctx.events {
// Handle trace.EvGoStart separately, because we need the goroutine name
// even if ignore the event otherwise.
if ev.Type == trace.EvGoStart {
if _, ok := gnames[ev.G]; !ok {
if len(ev.Stk) > 0 {
gnames[ev.G] = fmt.Sprintf("G%v %s", ev.G, ev.Stk[0].Fn)
} else {
gnames[ev.G] = fmt.Sprintf("G%v", ev.G)
}
}
}
// Ignore events that are from uninteresting goroutines
// or outside of the interesting timeframe.
if ctx.gs != nil && ev.P < trace.FakeP && !ctx.gs[ev.G] {
continue
}
if ev.Ts < ctx.startTime || ev.Ts > ctx.endTime {
continue
}
if ev.P < trace.FakeP && ev.P > maxProc {
maxProc = ev.P
}
switch ev.Type {
case trace.EvProcStart:
if ctx.gtrace {
continue
}
ctx.prunning++
ctx.emitThreadCounters(ev)
ctx.emitInstant(ev, "proc start")
case trace.EvProcStop:
if ctx.gtrace {
continue
}
ctx.prunning--
ctx.emitThreadCounters(ev)
ctx.emitInstant(ev, "proc stop")
case trace.EvGCStart:
ctx.emitSlice(ev, "GC")
case trace.EvGCDone:
case trace.EvGCScanStart:
if ctx.gtrace {
continue
}
ctx.emitSlice(ev, "MARK")
case trace.EvGCScanDone:
case trace.EvGCSweepStart:
ctx.emitSlice(ev, "SWEEP")
case trace.EvGCSweepDone:
case trace.EvGoStart:
ctx.grunnable--
ctx.grunning++
ctx.emitGoroutineCounters(ev)
ctx.emitSlice(ev, gnames[ev.G])
case trace.EvGoCreate:
ctx.gcount++
ctx.grunnable++
ctx.emitGoroutineCounters(ev)
ctx.emitArrow(ev, "go")
case trace.EvGoEnd:
ctx.gcount--
ctx.grunning--
ctx.emitGoroutineCounters(ev)
case trace.EvGoUnblock:
ctx.grunnable++
ctx.emitGoroutineCounters(ev)
ctx.emitArrow(ev, "unblock")
case trace.EvGoSysCall:
ctx.emitInstant(ev, "syscall")
case trace.EvGoSysExit:
ctx.grunnable++
ctx.emitGoroutineCounters(ev)
ctx.insyscall--
ctx.emitThreadCounters(ev)
ctx.emitArrow(ev, "sysexit")
case trace.EvGoSysBlock:
ctx.grunning--
ctx.emitGoroutineCounters(ev)
ctx.insyscall++
ctx.emitThreadCounters(ev)
case trace.EvGoSched, trace.EvGoPreempt:
ctx.grunnable++
ctx.grunning--
ctx.emitGoroutineCounters(ev)
case trace.EvGoStop,
trace.EvGoSleep, trace.EvGoBlock, trace.EvGoBlockSend, trace.EvGoBlockRecv,
trace.EvGoBlockSelect, trace.EvGoBlockSync, trace.EvGoBlockCond, trace.EvGoBlockNet:
ctx.grunning--
ctx.emitGoroutineCounters(ev)
case trace.EvGoWaiting:
ctx.grunnable--
ctx.emitGoroutineCounters(ev)
case trace.EvGoInSyscall:
ctx.insyscall++
ctx.emitThreadCounters(ev)
case trace.EvHeapAlloc:
ctx.heapAlloc = ev.Args[0]
ctx.emitHeapCounters(ev)
case trace.EvNextGC:
ctx.nextGC = ev.Args[0]
ctx.emitHeapCounters(ev)
}
}
ctx.emit(&ViewerEvent{Name: "process_name", Phase: "M", Pid: 0, Arg: &NameArg{"PROCS"}})
ctx.emit(&ViewerEvent{Name: "process_sort_index", Phase: "M", Pid: 0, Arg: &SortIndexArg{1}})
ctx.emit(&ViewerEvent{Name: "process_name", Phase: "M", Pid: 1, Arg: &NameArg{"STATS"}})
ctx.emit(&ViewerEvent{Name: "process_sort_index", Phase: "M", Pid: 1, Arg: &SortIndexArg{0}})
ctx.emit(&ViewerEvent{Name: "thread_name", Phase: "M", Pid: 0, Tid: trace.NetpollP, Arg: &NameArg{"Network"}})
ctx.emit(&ViewerEvent{Name: "thread_sort_index", Phase: "M", Pid: 0, Tid: trace.NetpollP, Arg: &SortIndexArg{-5}})
ctx.emit(&ViewerEvent{Name: "thread_name", Phase: "M", Pid: 0, Tid: trace.TimerP, Arg: &NameArg{"Timers"}})
ctx.emit(&ViewerEvent{Name: "thread_sort_index", Phase: "M", Pid: 0, Tid: trace.TimerP, Arg: &SortIndexArg{-4}})
ctx.emit(&ViewerEvent{Name: "thread_name", Phase: "M", Pid: 0, Tid: trace.SyscallP, Arg: &NameArg{"Syscalls"}})
ctx.emit(&ViewerEvent{Name: "thread_sort_index", Phase: "M", Pid: 0, Tid: trace.SyscallP, Arg: &SortIndexArg{-3}})
if !ctx.gtrace {
for i := 0; i <= maxProc; i++ {
ctx.emit(&ViewerEvent{Name: "thread_name", Phase: "M", Pid: 0, Tid: uint64(i), Arg: &NameArg{fmt.Sprintf("Proc %v", i)}})
}
}
if ctx.gtrace && ctx.gs != nil {
for k, v := range gnames {
if !ctx.gs[k] {
continue
}
ctx.emit(&ViewerEvent{Name: "thread_name", Phase: "M", Pid: 0, Tid: k, Arg: &NameArg{v}})
}
ctx.emit(&ViewerEvent{Name: "thread_sort_index", Phase: "M", Pid: 0, Tid: ctx.maing, Arg: &SortIndexArg{-2}})
ctx.emit(&ViewerEvent{Name: "thread_sort_index", Phase: "M", Pid: 0, Tid: 0, Arg: &SortIndexArg{-1}})
}
return ctx.data
}
func (ctx *traceContext) emit(e *ViewerEvent) {
ctx.data.Events = append(ctx.data.Events, e)
}
func (ctx *traceContext) time(ev *trace.Event) int64 {
if ev.Ts < ctx.startTime || ev.Ts > ctx.endTime {
fmt.Printf("ts=%v startTime=%v endTime\n", ev.Ts, ctx.startTime, ctx.endTime)
panic("timestamp is outside of trace range")
}
// NOTE: trace viewer wants timestamps in microseconds and it does not
// handle fractional timestamps (rounds them). We give it timestamps
// in nanoseconds to avoid rounding. See:
// https://github.com/google/trace-viewer/issues/624
return ev.Ts - ctx.startTime
}
func (ctx *traceContext) proc(ev *trace.Event) uint64 {
if ctx.gtrace && ev.P < trace.FakeP {
return ev.G
} else {
return uint64(ev.P)
}
}
func (ctx *traceContext) emitSlice(ev *trace.Event, name string) {
ctx.emit(&ViewerEvent{
Name: name,
Phase: "X",
Time: ctx.time(ev),
Dur: ctx.time(ev.Link) - ctx.time(ev),
Tid: ctx.proc(ev),
//Stack: ctx.stack(ev.Stk),
EndStack: ctx.stack(ev.Link.Stk),
})
}
func (ctx *traceContext) emitHeapCounters(ev *trace.Event) {
type Arg struct {
Allocated uint64
NextGC uint64
}
if ctx.gtrace {
return
}
diff := uint64(0)
if ctx.nextGC > ctx.heapAlloc {
diff = ctx.nextGC - ctx.heapAlloc
}
ctx.emit(&ViewerEvent{Name: "Heap", Phase: "C", Time: ctx.time(ev), Pid: 1, Arg: &Arg{ctx.heapAlloc, diff}})
}
func (ctx *traceContext) emitGoroutineCounters(ev *trace.Event) {
type Arg struct {
Running uint64
Runnable uint64
}
if ctx.gtrace {
return
}
ctx.emit(&ViewerEvent{Name: "Goroutines", Phase: "C", Time: ctx.time(ev), Pid: 1, Arg: &Arg{ctx.grunning, ctx.grunnable}})
}
func (ctx *traceContext) emitThreadCounters(ev *trace.Event) {
type Arg struct {
Running uint64
InSyscall uint64
}
if ctx.gtrace {
return
}
ctx.emit(&ViewerEvent{Name: "Threads", Phase: "C", Time: ctx.time(ev), Pid: 1, Arg: &Arg{ctx.prunning, ctx.insyscall}})
}
func (ctx *traceContext) emitInstant(ev *trace.Event, name string) {
ctx.emit(&ViewerEvent{Name: name, Phase: "I", Scope: "t", Time: ctx.time(ev), Tid: ctx.proc(ev), Stack: ctx.stack(ev.Stk)})
}
func (ctx *traceContext) emitArrow(ev *trace.Event, name string) {
if ev.Link == nil {
// The other end of the arrow is not captured in the trace.
// For example, a goroutine was unblocked but was not scheduled before trace stop.
return
}
if ctx.gtrace && (!ctx.gs[ev.Link.G] || ev.Link.Ts < ctx.startTime || ev.Link.Ts > ctx.endTime) {
return
}
ctx.arrowSeq++
ctx.emit(&ViewerEvent{Name: name, Phase: "s", Tid: ctx.proc(ev), ID: ctx.arrowSeq, Time: ctx.time(ev), Stack: ctx.stack(ev.Stk)})
ctx.emit(&ViewerEvent{Name: name, Phase: "t", Tid: ctx.proc(ev.Link), ID: ctx.arrowSeq, Time: ctx.time(ev.Link)})
}
func (ctx *traceContext) stack(stk []*trace.Frame) int {
return ctx.buildBranch(ctx.frameTree, stk)
}
// buildBranch builds one branch in the prefix tree rooted at ctx.frameTree.
func (ctx *traceContext) buildBranch(parent frameNode, stk []*trace.Frame) int {
if len(stk) == 0 {
return parent.id
}
last := len(stk) - 1
frame := stk[last]
stk = stk[:last]
node, ok := parent.children[frame.PC]
if !ok {
ctx.frameSeq++
node.id = ctx.frameSeq
node.children = make(map[uint64]frameNode)
parent.children[frame.PC] = node
ctx.data.Frames[strconv.Itoa(node.id)] = ViewerFrame{fmt.Sprintf("%v:%v", frame.Fn, frame.Line), parent.id}
}
return ctx.buildBranch(node, stk)
}

View File

@ -0,0 +1,662 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package trace
import (
"bufio"
"bytes"
"fmt"
"io"
"os"
"os/exec"
"sort"
"strconv"
"strings"
)
// Event describes one event in the trace.
type Event struct {
Off int // offset in input file (for debugging and error reporting)
Type byte // one of Ev*
Ts int64 // timestamp in nanoseconds
P int // P on which the event happened (can be one of TimerP, NetpollP, SyscallP)
G uint64 // G on which the event happened
StkID uint64 // unique stack ID
Stk []*Frame // stack trace (can be empty)
Args [2]uint64 // event-type-specific arguments
// linked event (can be nil), depends on event type:
// for GCStart: the GCStop
// for GCScanStart: the GCScanDone
// for GCSweepStart: the GCSweepDone
// for GoCreate: first GoStart of the created goroutine
// for GoStart: the associated GoEnd, GoBlock or other blocking event
// for GoSched/GoPreempt: the next GoStart
// for GoBlock and other blocking events: the unblock event
// for GoUnblock: the associated GoStart
// for blocking GoSysCall: the associated GoSysExit
// for GoSysExit: the next GoStart
Link *Event
}
// Frame is a frame in stack traces.
type Frame struct {
PC uint64
Fn string
File string
Line int
}
const (
// Special P identifiers:
FakeP = 1000000 + iota
TimerP // depicts timer unblocks
NetpollP // depicts network unblocks
SyscallP // depicts returns from syscalls
)
// parseTrace parses, post-processes and verifies the trace.
func Parse(r io.Reader) ([]*Event, error) {
rawEvents, err := readTrace(r)
if err != nil {
return nil, err
}
events, err := parseEvents(rawEvents)
if err != nil {
return nil, err
}
err = postProcessTrace(events)
if err != nil {
return nil, err
}
return events, nil
}
// rawEvent is a helper type used during parsing.
type rawEvent struct {
off int
typ byte
args []uint64
}
// readTrace does wire-format parsing and verification.
// It does not care about specific event types and argument meaning.
func readTrace(r io.Reader) ([]rawEvent, error) {
// Read and validate trace header.
var buf [8]byte
off, err := r.Read(buf[:])
if off != 8 || err != nil {
return nil, fmt.Errorf("failed to read header: read %v, err %v", off, err)
}
if bytes.Compare(buf[:], []byte("gotrace\x00")) != 0 {
return nil, fmt.Errorf("not a trace file")
}
// Read events.
var events []rawEvent
for {
// Read event type and number of arguments (1 byte).
off0 := off
n, err := r.Read(buf[:1])
if err == io.EOF {
break
}
if err != nil || n != 1 {
return nil, fmt.Errorf("failed to read trace at offset 0x%x: n=%v err=%v", off0, n, err)
}
off += n
typ := buf[0] << 2 >> 2
narg := buf[0]>>6 + 1
ev := rawEvent{typ: typ, off: off0}
if narg <= 3 {
for i := 0; i < int(narg); i++ {
var v uint64
v, off, err = readVal(r, off)
if err != nil {
return nil, err
}
ev.args = append(ev.args, v)
}
} else {
// If narg == 4, the first value is length of the event in bytes.
var v uint64
v, off, err = readVal(r, off)
if err != nil {
return nil, err
}
evLen := v
off1 := off
for evLen > uint64(off-off1) {
v, off, err = readVal(r, off)
if err != nil {
return nil, err
}
ev.args = append(ev.args, v)
}
if evLen != uint64(off-off1) {
return nil, fmt.Errorf("event has wrong length at offset 0x%x: want %v, got %v", off0, evLen, off-off1)
}
}
events = append(events, ev)
}
return events, nil
}
// Parse events transforms raw events into events.
// It does analyze and verify per-event-type arguments.
func parseEvents(rawEvents []rawEvent) (events []*Event, err error) {
var ticksPerSec, lastTs int64
var lastG, timerGoid uint64
var lastP int
lastGs := make(map[int]uint64) // last goroutine running on P
stacks := make(map[uint64][]*Frame)
for _, raw := range rawEvents {
if raw.typ == EvNone || raw.typ >= EvCount {
err = fmt.Errorf("unknown event type %v at offset 0x%x", raw.typ, raw.off)
return
}
desc := EventDescriptions[raw.typ]
if desc.Name == "" {
err = fmt.Errorf("missing description for event type %v", raw.typ)
return
}
if raw.typ != EvStack {
narg := len(desc.Args)
if desc.Stack {
narg++
}
if raw.typ != EvBatch && raw.typ != EvFrequency && raw.typ != EvTimerGoroutine {
narg++ // timestamp
}
if len(raw.args) != narg {
err = fmt.Errorf("%v has wrong number of arguments at offset 0x%x: want %v, got %v",
desc.Name, raw.off, narg, len(raw.args))
return
}
}
switch raw.typ {
case EvBatch:
lastGs[lastP] = lastG
lastP = int(raw.args[0])
lastG = lastGs[lastP]
lastTs = int64(raw.args[1])
case EvFrequency:
ticksPerSec = int64(raw.args[0])
if ticksPerSec <= 0 {
err = fmt.Errorf("EvFrequency contains invalid frequency %v at offset 0x%x",
ticksPerSec, raw.off)
return
}
case EvTimerGoroutine:
timerGoid = raw.args[0]
case EvStack:
if len(raw.args) < 2 {
err = fmt.Errorf("EvStack has wrong number of arguments at offset 0x%x: want at least 2, got %v",
raw.off, len(raw.args))
return
}
size := raw.args[1]
if size > 1000 {
err = fmt.Errorf("EvStack has bad number of frames at offset 0x%x: %v",
raw.off, size)
return
}
id := raw.args[0]
if id != 0 && size > 0 {
stk := make([]*Frame, size)
for i := 0; i < int(size); i++ {
stk[i] = &Frame{PC: raw.args[i+2]}
}
stacks[id] = stk
}
default:
e := &Event{Off: raw.off, Type: raw.typ, P: lastP, G: lastG}
e.Ts = lastTs + int64(raw.args[0])
lastTs = e.Ts
for i := range desc.Args {
e.Args[i] = raw.args[i+1]
}
if desc.Stack {
e.StkID = raw.args[len(desc.Args)+1]
}
switch raw.typ {
case EvGoStart:
lastG = e.Args[0]
e.G = lastG
case EvGCStart, EvGCDone, EvGCScanStart, EvGCScanDone:
e.G = 0
case EvGoEnd, EvGoStop, EvGoSched, EvGoPreempt,
EvGoSleep, EvGoBlock, EvGoBlockSend, EvGoBlockRecv,
EvGoBlockSelect, EvGoBlockSync, EvGoBlockCond, EvGoBlockNet,
EvGoSysBlock:
lastG = 0
}
events = append(events, e)
}
}
// Attach stack traces.
for _, ev := range events {
if ev.StkID != 0 {
ev.Stk = stacks[ev.StkID]
}
}
// Sort by time and translate cpu ticks to real time.
sort.Sort(eventList(events))
if ticksPerSec == 0 {
err = fmt.Errorf("no EvFrequency event")
return
}
minTs := events[0].Ts
for _, ev := range events {
ev.Ts = (ev.Ts - minTs) * 1e9 / ticksPerSec
// Move timers and syscalls to separate fake Ps.
if timerGoid != 0 && ev.G == timerGoid && ev.Type == EvGoUnblock {
ev.P = TimerP
}
if ev.Type == EvGoSysExit {
ev.P = SyscallP
ev.G = ev.Args[0]
}
}
return
}
// postProcessTrace does inter-event verification and information restoration.
// The resulting trace is guaranteed to be consistent
// (for example, a P does not run two Gs at the same time, or a G is indeed
// blocked before an unblock event).
func postProcessTrace(events []*Event) error {
const (
gDead = iota
gRunnable
gRunning
gWaiting
)
type gdesc struct {
state int
ev *Event
evStart *Event
}
type pdesc struct {
running bool
g uint64
evGC *Event
evScan *Event
evSweep *Event
}
gs := make(map[uint64]gdesc)
ps := make(map[int]pdesc)
gs[0] = gdesc{state: gRunning}
checkRunning := func(p pdesc, g gdesc, ev *Event) error {
name := EventDescriptions[ev.Type].Name
if g.state != gRunning {
return fmt.Errorf("g %v is not running while %v (offset %v, time %v)", ev.G, name, ev.Off, ev.Ts)
}
if p.g != ev.G {
return fmt.Errorf("p %v is not running g %v while %v (offset %v, time %v)", ev.P, ev.G, name, ev.Off, ev.Ts)
}
return nil
}
for _, ev := range events {
g := gs[ev.G]
p := ps[ev.P]
switch ev.Type {
case EvProcStart:
if p.running {
return fmt.Errorf("p %v is running before start (offset %v, time %v)", ev.P, ev.Off, ev.Ts)
}
p.running = true
case EvProcStop:
if !p.running {
return fmt.Errorf("p %v is not running before stop (offset %v, time %v)", ev.P, ev.Off, ev.Ts)
}
if p.g != 0 {
return fmt.Errorf("p %v is running a goroutine %v during stop (offset %v, time %v)", ev.P, p.g, ev.Off, ev.Ts)
}
p.running = false
case EvGCStart:
if p.evGC != nil {
return fmt.Errorf("previous GC is not ended before a new one (offset %v, time %v)", ev.Off, ev.Ts)
}
p.evGC = ev
case EvGCDone:
if p.evGC == nil {
return fmt.Errorf("bogus GC end (offset %v, time %v)", ev.Off, ev.Ts)
}
p.evGC.Link = ev
p.evGC = nil
case EvGCScanStart:
if p.evScan != nil {
return fmt.Errorf("previous scanning is not ended before a new one (offset %v, time %v)", ev.Off, ev.Ts)
}
p.evScan = ev
case EvGCScanDone:
if p.evScan == nil {
return fmt.Errorf("bogus scanning end (offset %v, time %v)", ev.Off, ev.Ts)
}
p.evScan.Link = ev
p.evScan = nil
case EvGCSweepStart:
if p.evSweep != nil {
return fmt.Errorf("previous sweeping is not ended before a new one (offset %v, time %v)", ev.Off, ev.Ts)
}
p.evSweep = ev
case EvGCSweepDone:
if p.evSweep == nil {
return fmt.Errorf("bogus sweeping end (offset %v, time %v)", ev.Off, ev.Ts)
}
p.evSweep.Link = ev
p.evSweep = nil
case EvGoWaiting:
g1 := gs[ev.Args[0]]
if g1.state != gRunnable {
return fmt.Errorf("g %v is not runnable before EvGoWaiting (offset %v, time %v)", ev.Args[0], ev.Off, ev.Ts)
}
g1.state = gWaiting
gs[ev.Args[0]] = g1
case EvGoInSyscall:
// this case is intentionally left blank
case EvGoCreate:
if err := checkRunning(p, g, ev); err != nil {
return err
}
if _, ok := gs[ev.Args[0]]; ok {
return fmt.Errorf("g %v already exists (offset %v, time %v)", ev.Args[0], ev.Off, ev.Ts)
}
gs[ev.Args[0]] = gdesc{state: gRunnable, ev: ev}
case EvGoStart:
if g.state != gRunnable {
return fmt.Errorf("g %v is not runnable before start (offset %v, time %v)", ev.G, ev.Off, ev.Ts)
}
if p.g != 0 {
return fmt.Errorf("p %v is already running g %v while start g %v (offset %v, time %v)", ev.P, p.g, ev.G, ev.Off, ev.Ts)
}
g.state = gRunning
g.evStart = ev
p.g = ev.G
if g.ev != nil {
if g.ev.Type == EvGoCreate {
// +1 because symblizer expects return pc.
ev.Stk = []*Frame{&Frame{PC: g.ev.Args[1] + 1}}
}
g.ev.Link = ev
g.ev = nil
}
case EvGoEnd, EvGoStop:
if err := checkRunning(p, g, ev); err != nil {
return err
}
g.evStart.Link = ev
g.evStart = nil
g.state = gDead
p.g = 0
case EvGoSched, EvGoPreempt:
if err := checkRunning(p, g, ev); err != nil {
return err
}
g.state = gRunnable
g.evStart.Link = ev
g.evStart = nil
p.g = 0
g.ev = ev
case EvGoUnblock:
if g.state != gRunning {
return fmt.Errorf("g %v is not running while unpark (offset %v, time %v)", ev.G, ev.Off, ev.Ts)
}
if ev.P != TimerP && p.g != ev.G {
return fmt.Errorf("p %v is not running g %v while unpark (offset %v, time %v)", ev.P, ev.G, ev.Off, ev.Ts)
}
g1 := gs[ev.Args[0]]
if g1.state != gWaiting {
return fmt.Errorf("g %v is not waiting before unpark (offset %v, time %v)", ev.Args[0], ev.Off, ev.Ts)
}
if g1.ev != nil && g1.ev.Type == EvGoBlockNet && ev.P != TimerP {
ev.P = NetpollP
}
if g1.ev != nil {
g1.ev.Link = ev
}
g1.state = gRunnable
g1.ev = ev
gs[ev.Args[0]] = g1
case EvGoSysCall:
if err := checkRunning(p, g, ev); err != nil {
return err
}
g.ev = ev
case EvGoSysBlock:
if err := checkRunning(p, g, ev); err != nil {
return err
}
g.state = gRunnable
g.evStart.Link = ev
g.evStart = nil
p.g = 0
case EvGoSysExit:
if g.state != gRunnable {
return fmt.Errorf("g %v is not runnable during syscall exit (offset %v, time %v)", ev.G, ev.Off, ev.Ts)
}
if g.ev != nil && g.ev.Type == EvGoSysCall {
g.ev.Link = ev
}
g.ev = ev
case EvGoSleep, EvGoBlock, EvGoBlockSend, EvGoBlockRecv,
EvGoBlockSelect, EvGoBlockSync, EvGoBlockCond, EvGoBlockNet:
if err := checkRunning(p, g, ev); err != nil {
return err
}
g.state = gWaiting
g.ev = ev
g.evStart.Link = ev
g.evStart = nil
p.g = 0
}
gs[ev.G] = g
ps[ev.P] = p
}
// TODO(dvyukov): restore stacks for EvGoStart events.
// TODO(dvyukov): test that all EvGoStart events has non-nil Link.
return nil
}
// symbolizeTrace attaches func/file/line info to stack traces.
func Symbolize(events []*Event, bin string) error {
// First, collect and dedup all pcs.
pcs := make(map[uint64]*Frame)
for _, ev := range events {
for _, f := range ev.Stk {
pcs[f.PC] = nil
}
}
// Start addr2line.
cmd := exec.Command("go", "tool", "addr2line", bin)
in, err := cmd.StdinPipe()
if err != nil {
return fmt.Errorf("failed to pipe addr2line stdin: %v", err)
}
cmd.Stderr = os.Stderr
out, err := cmd.StdoutPipe()
if err != nil {
return fmt.Errorf("failed to pipe addr2line stdout: %v", err)
}
err = cmd.Start()
if err != nil {
return fmt.Errorf("failed to start addr2line: %v", err)
}
outb := bufio.NewReader(out)
// Write all pcs to addr2line.
// Need to copy pcs to an array, because map iteration order is non-deterministic.
var pcArray []uint64
for pc := range pcs {
pcArray = append(pcArray, pc)
_, err := fmt.Fprintf(in, "0x%x\n", pc-1)
if err != nil {
return fmt.Errorf("failed to write to addr2line: %v", err)
}
}
in.Close()
// Read in answers.
for _, pc := range pcArray {
fn, err := outb.ReadString('\n')
if err != nil {
return fmt.Errorf("failed to read from addr2line: %v", err)
}
file, err := outb.ReadString('\n')
if err != nil {
return fmt.Errorf("failed to read from addr2line: %v", err)
}
f := &Frame{PC: pc}
f.Fn = fn[:len(fn)-1]
f.File = file[:len(file)-1]
if colon := strings.LastIndex(f.File, ":"); colon != -1 {
ln, err := strconv.Atoi(f.File[colon+1:])
if err == nil {
f.File = f.File[:colon]
f.Line = ln
}
}
pcs[pc] = f
}
cmd.Wait()
// Replace frames in events array.
for _, ev := range events {
for i, f := range ev.Stk {
ev.Stk[i] = pcs[f.PC]
}
}
return nil
}
// readVal reads unsigned base-128 value from r.
func readVal(r io.Reader, off0 int) (v uint64, off int, err error) {
off = off0
for i := 0; i < 10; i++ {
var buf [1]byte
var n int
n, err = r.Read(buf[:])
if err != nil || n != 1 {
return 0, 0, fmt.Errorf("failed to read trace at offset: read %v, error %v", off0, n, err)
}
off++
v |= uint64(buf[0]&0x7f) << (uint(i) * 7)
if buf[0]&0x80 == 0 {
return
}
}
return 0, 0, fmt.Errorf("bad value at offset 0x%x", off0)
}
type eventList []*Event
func (l eventList) Len() int {
return len(l)
}
func (l eventList) Less(i, j int) bool {
return l[i].Ts < l[j].Ts
}
func (l eventList) Swap(i, j int) {
l[i], l[j] = l[j], l[i]
}
// Event types in the trace.
// Verbatim copy from src/runtime/trace.go.
const (
EvNone = 0 // unused
EvBatch = 1 // start of per-P batch of events [pid, timestamp]
EvFrequency = 2 // contains tracer timer frequency [frequency (ticks per second)]
EvStack = 3 // stack [stack id, number of PCs, array of PCs]
EvGomaxprocs = 4 // current value of GOMAXPROCS [timestamp, GOMAXPROCS, stack id]
EvProcStart = 5 // start of P [timestamp]
EvProcStop = 6 // stop of P [timestamp]
EvGCStart = 7 // GC start [timestamp, stack id]
EvGCDone = 8 // GC done [timestamp]
EvGCScanStart = 9 // GC scan start [timestamp]
EvGCScanDone = 10 // GC scan done [timestamp]
EvGCSweepStart = 11 // GC sweep start [timestamp, stack id]
EvGCSweepDone = 12 // GC sweep done [timestamp]
EvGoCreate = 13 // goroutine creation [timestamp, new goroutine id, start PC, stack id]
EvGoStart = 14 // goroutine starts running [timestamp, goroutine id]
EvGoEnd = 15 // goroutine ends [timestamp]
EvGoStop = 16 // goroutine stops (like in select{}) [timestamp, stack]
EvGoSched = 17 // goroutine calls Gosched [timestamp, stack]
EvGoPreempt = 18 // goroutine is preempted [timestamp, stack]
EvGoSleep = 19 // goroutine calls Sleep [timestamp, stack]
EvGoBlock = 20 // goroutine blocks [timestamp, stack]
EvGoUnblock = 21 // goroutine is unblocked [timestamp, goroutine id, stack]
EvGoBlockSend = 22 // goroutine blocks on chan send [timestamp, stack]
EvGoBlockRecv = 23 // goroutine blocks on chan recv [timestamp, stack]
EvGoBlockSelect = 24 // goroutine blocks on select [timestamp, stack]
EvGoBlockSync = 25 // goroutine blocks on Mutex/RWMutex [timestamp, stack]
EvGoBlockCond = 26 // goroutine blocks on Cond [timestamp, stack]
EvGoBlockNet = 27 // goroutine blocks on network [timestamp, stack]
EvGoSysCall = 28 // syscall enter [timestamp, stack]
EvGoSysExit = 29 // syscall exit [timestamp, goroutine id]
EvGoSysBlock = 30 // syscall blocks [timestamp, stack]
EvGoWaiting = 31 // denotes that goroutine is blocked when tracing starts [goroutine id]
EvGoInSyscall = 32 // denotes that goroutine is in syscall when tracing starts [goroutine id]
EvHeapAlloc = 33 // memstats.heap_alloc change [timestamp, heap_alloc]
EvNextGC = 34 // memstats.next_gc change [timestamp, next_gc]
EvTimerGoroutine = 35 // denotes timer goroutine [timer goroutine id]
EvCount = 36
)
var EventDescriptions = [EvCount]struct {
Name string
Stack bool
Args []string
}{
EvNone: {"None", false, []string{}},
EvBatch: {"Batch", false, []string{"p", "ticks"}},
EvFrequency: {"Frequency", false, []string{"freq"}},
EvStack: {"Stack", false, []string{"id", "siz"}},
EvGomaxprocs: {"Gomaxprocs", true, []string{"procs"}},
EvProcStart: {"ProcStart", false, []string{}},
EvProcStop: {"ProcStop", false, []string{}},
EvGCStart: {"GCStart", true, []string{}},
EvGCDone: {"GCDone", false, []string{}},
EvGCScanStart: {"GCScanStart", false, []string{}},
EvGCScanDone: {"GCScanDone", false, []string{}},
EvGCSweepStart: {"GCSweepStart", true, []string{}},
EvGCSweepDone: {"GCSweepDone", false, []string{}},
EvGoCreate: {"GoCreate", true, []string{"g", "pc"}},
EvGoStart: {"GoStart", false, []string{"g"}},
EvGoEnd: {"GoEnd", false, []string{}},
EvGoStop: {"GoStop", true, []string{}},
EvGoSched: {"GoSched", true, []string{}},
EvGoPreempt: {"GoPreempt", true, []string{}},
EvGoSleep: {"GoSleep", true, []string{}},
EvGoBlock: {"GoBlock", true, []string{}},
EvGoUnblock: {"GoUnblock", true, []string{"g"}},
EvGoBlockSend: {"GoBlockSend", true, []string{}},
EvGoBlockRecv: {"GoBlockRecv", true, []string{}},
EvGoBlockSelect: {"GoBlockSelect", true, []string{}},
EvGoBlockSync: {"GoBlockSync", true, []string{}},
EvGoBlockCond: {"GoBlockCond", true, []string{}},
EvGoBlockNet: {"GoBlockNet", true, []string{}},
EvGoSysCall: {"GoSysCall", true, []string{}},
EvGoSysExit: {"GoSysExit", false, []string{"g"}},
EvGoSysBlock: {"GoSysBlock", true, []string{}},
EvGoWaiting: {"GoWaiting", false, []string{"g"}},
EvGoInSyscall: {"GoInSyscall", false, []string{"g"}},
EvHeapAlloc: {"HeapAlloc", false, []string{"mem"}},
EvNextGC: {"NextGC", false, []string{"mem"}},
EvTimerGoroutine: {"TimerGoroutine", false, []string{"g"}},
}

View File

@ -25,9 +25,6 @@ import (
)
const (
maxValueLength = 4096
maxHeaderLines = 1024
chunkSize = 4 << 10 // 4 KB chunks
defaultMaxMemory = 32 << 20 // 32 MB
)

View File

@ -302,8 +302,8 @@ func (v Value) Call(in []Value) []Value {
// CallSlice calls the variadic function v with the input arguments in,
// assigning the slice in[len(in)-1] to v's final variadic argument.
// For example, if len(in) == 3, v.Call(in) represents the Go call v(in[0], in[1], in[2]...).
// Call panics if v's Kind is not Func or if v is not variadic.
// For example, if len(in) == 3, v.CallSlice(in) represents the Go call v(in[0], in[1], in[2]...).
// CallSlice panics if v's Kind is not Func or if v is not variadic.
// It returns the output results as Values.
// As in Go, each input argument must be assignable to the
// type of the function's corresponding input parameter.

View File

@ -30,6 +30,19 @@ TEXT runtime·rt0_go(SB),NOSPLIT,$0
CPUID
CMPL AX, $0
JE nocpuinfo
// Figure out how to serialize RDTSC.
// On Intel processors LFENCE is enough. AMD requires MFENCE.
// Don't know about the rest, so let's do MFENCE.
CMPL BX, $0x756E6547 // "Genu"
JNE notintel
CMPL DX, $0x49656E69 // "ineI"
JNE notintel
CMPL CX, $0x6C65746E // "ntel"
JNE notintel
MOVB $1, runtime·lfenceBeforeRdtsc(SB)
notintel:
MOVL $1, AX
CPUID
MOVL CX, runtime·cpuid_ecx(SB)
@ -868,9 +881,17 @@ TEXT runtime·gogetcallersp(SB),NOSPLIT,$0-8
MOVL AX, ret+4(FP)
RET
// int64 runtime·cputicks(void), so really
// void runtime·cputicks(int64 *ticks)
// func cputicks() int64
TEXT runtime·cputicks(SB),NOSPLIT,$0-8
TESTL $0x4000000, runtime·cpuid_edx(SB) // no sse2, no mfence
JEQ done
CMPB runtime·lfenceBeforeRdtsc(SB), $1
JNE mfence
BYTE $0x0f; BYTE $0xae; BYTE $0xe8 // LFENCE
JMP done
mfence:
BYTE $0x0f; BYTE $0xae; BYTE $0xf0 // MFENCE
done:
RDTSC
MOVL AX, ret_lo+0(FP)
MOVL DX, ret_hi+4(FP)

View File

@ -30,6 +30,19 @@ TEXT runtime·rt0_go(SB),NOSPLIT,$0
CPUID
CMPQ AX, $0
JE nocpuinfo
// Figure out how to serialize RDTSC.
// On Intel processors LFENCE is enough. AMD requires MFENCE.
// Don't know about the rest, so let's do MFENCE.
CMPL BX, $0x756E6547 // "Genu"
JNE notintel
CMPL DX, $0x49656E69 // "ineI"
JNE notintel
CMPL CX, $0x6C65746E // "ntel"
JNE notintel
MOVB $1, runtime·lfenceBeforeRdtsc(SB)
notintel:
MOVQ $1, AX
CPUID
MOVL CX, runtime·cpuid_ecx(SB)
@ -865,8 +878,15 @@ TEXT runtime·gogetcallersp(SB),NOSPLIT,$0-16
MOVQ AX, ret+8(FP)
RET
// int64 runtime·cputicks(void)
// func cputicks() int64
TEXT runtime·cputicks(SB),NOSPLIT,$0-0
CMPB runtime·lfenceBeforeRdtsc(SB), $1
JNE mfence
BYTE $0x0f; BYTE $0xae; BYTE $0xe8 // LFENCE
JMP done
mfence:
BYTE $0x0f; BYTE $0xae; BYTE $0xf0 // MFENCE
done:
RDTSC
SHLQ $32, DX
ADDQ DX, AX

View File

@ -1,13 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
* Cgo interface.
*/
void runtime·cgocall(void (*fn)(void*), void*);
int32 runtime·cgocall_errno(void (*fn)(void*), void*);
void runtime·cgocallback(void (*fn)(void), void*, uintptr);
void *runtime·cmalloc(uintptr);
void runtime·cfree(void*);

View File

@ -655,7 +655,7 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
}
if shouldtriggergc() {
gogc(0)
startGC(gcBackgroundMode)
} else if shouldhelpgc && atomicloaduint(&bggc.working) == 1 {
// bggc.lock not taken since race on bggc.working is benign.
// At worse we don't call gchelpwork.

View File

@ -120,7 +120,6 @@ import "unsafe"
const (
_DebugGC = 0
_DebugGCPtrs = false // if true, print trace of every pointer load during GC
_ConcurrentSweep = true
_FinBlockSize = 4 * 1024
_RootData = 0
@ -206,9 +205,7 @@ func shouldtriggergc() bool {
return triggerratio*(int64(memstats.next_gc)-int64(memstats.heap_alloc)) <= int64(memstats.next_gc) && atomicloaduint(&bggc.working) == 0
}
var work workdata
type workdata struct {
var work struct {
full uint64 // lock-free list of full blocks workbuf
empty uint64 // lock-free list of empty blocks workbuf
partial uint64 // lock-free list of partially filled blocks workbuf
@ -226,19 +223,21 @@ type workdata struct {
// GC runs a garbage collection.
func GC() {
gogc(2)
startGC(gcForceBlockMode)
}
// force = 0 - start concurrent GC
// force = 1 - do STW GC regardless of current heap usage
// force = 2 - go STW GC and eager sweep
func gogc(force int32) {
const (
gcBackgroundMode = iota // concurrent GC
gcForceMode // stop-the-world GC now
gcForceBlockMode // stop-the-world GC now and wait for sweep
)
func startGC(mode int) {
// The gc is turned off (via enablegc) until the bootstrap has completed.
// Also, malloc gets called in the guts of a number of libraries that might be
// holding locks. To avoid deadlocks during stoptheworld, don't bother
// trying to run gc while holding a lock. The next mallocgc without a lock
// will do the gc instead.
mp := acquirem()
if gp := getg(); gp == mp.g0 || mp.locks > 1 || !memstats.enablegc || panicking != 0 || gcpercent < 0 {
releasem(mp)
@ -247,20 +246,23 @@ func gogc(force int32) {
releasem(mp)
mp = nil
if force == 0 {
lock(&bggc.lock)
if !bggc.started {
bggc.working = 1
bggc.started = true
go backgroundgc()
} else if bggc.working == 0 {
bggc.working = 1
ready(bggc.g)
}
unlock(&bggc.lock)
} else {
gcwork(force)
if mode != gcBackgroundMode {
// special synchronous cases
gc(mode)
return
}
// trigger concurrent GC
lock(&bggc.lock)
if !bggc.started {
bggc.working = 1
bggc.started = true
go backgroundgc()
} else if bggc.working == 0 {
bggc.working = 1
ready(bggc.g)
}
unlock(&bggc.lock)
}
// State of the background concurrent GC goroutine.
@ -276,15 +278,15 @@ var bggc struct {
func backgroundgc() {
bggc.g = getg()
for {
gcwork(0)
gc(gcBackgroundMode)
lock(&bggc.lock)
bggc.working = 0
goparkunlock(&bggc.lock, "Concurrent GC wait", traceEvGoBlock)
}
}
func gcwork(force int32) {
func gc(mode int) {
// Ok, we're doing it! Stop everybody else
semacquire(&worldsema, false)
// Pick up the remaining unswept/not being swept spans concurrently
@ -292,13 +294,11 @@ func gcwork(force int32) {
sweep.nbgsweep++
}
// Ok, we're doing it! Stop everybody else
mp := acquirem()
mp.preemptoff = "gcing"
releasem(mp)
gctimer.count++
if force == 0 {
if mode == gcBackgroundMode {
gctimer.cycle.sweepterm = nanotime()
}
@ -307,31 +307,40 @@ func gcwork(force int32) {
traceGCStart()
}
// Pick up the remaining unswept/not being swept spans before we STW
for gosweepone() != ^uintptr(0) {
sweep.nbgsweep++
}
systemstack(stoptheworld)
systemstack(finishsweep_m) // finish sweep before we start concurrent scan.
if force == 0 { // Do as much work concurrently as possible
gcphase = _GCscan
systemstack(starttheworld)
gctimer.cycle.scan = nanotime()
// Do a concurrent heap scan before we stop the world.
systemstack(gcscan_m)
gctimer.cycle.installmarkwb = nanotime()
systemstack(stoptheworld)
systemstack(gcinstallmarkwb)
systemstack(harvestwbufs)
systemstack(starttheworld)
gctimer.cycle.mark = nanotime()
systemstack(gcmark_m)
gctimer.cycle.markterm = nanotime()
systemstack(stoptheworld)
systemstack(gcinstalloffwb_m)
if mode == gcBackgroundMode { // Do as much work concurrently as possible
systemstack(func() {
gcphase = _GCscan
// Concurrent scan.
starttheworld()
gctimer.cycle.scan = nanotime()
gcscan_m()
gctimer.cycle.installmarkwb = nanotime()
// Sync.
stoptheworld()
gcphase = _GCmark
harvestwbufs()
// Concurrent mark.
starttheworld()
gctimer.cycle.mark = nanotime()
var gcw gcWork
gcDrain(&gcw)
gcw.dispose()
// Begin mark termination.
gctimer.cycle.markterm = nanotime()
stoptheworld()
gcphase = _GCoff
})
} else {
// For non-concurrent GC (force != 0) g stack have not been scanned so
// set gcscanvalid such that mark termination scans all stacks.
// For non-concurrent GC (mode != gcBackgroundMode)
// g stack have not been scanned so set gcscanvalid
// such that mark termination scans all stacks.
// No races here since we are in a STW phase.
for _, gp := range allgs {
gp.gcworkdone = false // set to true in gcphasework
@ -341,36 +350,46 @@ func gcwork(force int32) {
startTime := nanotime()
if mp != acquirem() {
throw("gogc: rescheduled")
throw("gcwork: rescheduled")
}
// TODO(rsc): Should the concurrent GC clear pools earlier?
clearpools()
_g_ := getg()
_g_.m.traceback = 2
gp := _g_.m.curg
casgstatus(gp, _Grunning, _Gwaiting)
gp.waitreason = "garbage collection"
// Run gc on the g0 stack. We do this so that the g stack
// we're currently running on will no longer change. Cuts
// the root set down a bit (g0 stacks are not scanned, and
// we don't need to scan gc's internal state). We also
// need to switch to g0 so we can shrink the stack.
n := 1
if debug.gctrace > 1 {
n = 2
}
eagersweep := force >= 2
for i := 0; i < n; i++ {
if i > 0 {
// refresh start time if doing a second GC
startTime = nanotime()
}
// switch to g0, call gc, then switch back
systemstack(func() {
gc_m(startTime, eagersweep)
})
}
systemstack(func() {
gccheckmark_m(startTime, eagersweep)
gcMark(startTime)
if debug.gccheckmark > 0 {
// Run a full stop-the-world mark using checkmark bits,
// to check that we didn't forget to mark anything during
// the concurrent mark process.
initCheckmarks()
gcMark(startTime)
clearCheckmarks()
}
gcSweep(mode)
if debug.gctrace > 1 {
startTime = nanotime()
finishsweep_m()
gcMark(startTime)
gcSweep(mode)
}
})
_g_.m.traceback = 0
casgstatus(gp, _Gwaiting, _Grunning)
if trace.enabled {
traceGCDone()
traceGoStart()
@ -379,13 +398,13 @@ func gcwork(force int32) {
// all done
mp.preemptoff = ""
if force == 0 {
if mode == gcBackgroundMode {
gctimer.cycle.sweep = nanotime()
}
semrelease(&worldsema)
if force == 0 {
if mode == gcBackgroundMode {
if gctimer.verbose > 1 {
GCprinttimes()
} else if gctimer.verbose > 0 {
@ -405,109 +424,24 @@ func gcwork(force int32) {
}
}
// For now this must be bracketed with a stoptheworld and a starttheworld to ensure
// all go routines see the new barrier.
//go:nowritebarrier
func gcinstalloffwb_m() {
gcphase = _GCoff
}
// For now this must be bracketed with a stoptheworld and a starttheworld to ensure
// all go routines see the new barrier.
//go:nowritebarrier
func gcinstallmarkwb() {
gcphase = _GCmark
}
// Mark all objects that are known about.
// This is the concurrent mark phase.
//go:nowritebarrier
func gcmark_m() {
var gcw gcWork
gcDrain(&gcw)
gcw.dispose()
// TODO add another harvestwbuf and reset work.nwait=0, work.ndone=0, and work.nproc=1
// and repeat the above gcDrain.
}
// Called from malloc.go using systemstack.
// The world is stopped. Rerun the scan and mark phases
// using the bitMarkedCheck bit instead of the
// bitMarked bit. If the marking encounters an
// bitMarked bit that is not set then we throw.
//go:nowritebarrier
func gccheckmark_m(startTime int64, eagersweep bool) {
if debug.gccheckmark == 0 {
return
}
if checkmarkphase {
throw("gccheckmark_m, entered with checkmarkphase already true")
}
checkmarkphase = true
initCheckmarks()
gc_m(startTime, eagersweep) // turns off checkmarkphase + calls clearcheckmarkbits
}
// Called from malloc.go using systemstack, stopping and starting the world handled in caller.
//go:nowritebarrier
func gc_m(start_time int64, eagersweep bool) {
_g_ := getg()
gp := _g_.m.curg
casgstatus(gp, _Grunning, _Gwaiting)
gp.waitreason = "garbage collection"
gc(start_time, eagersweep)
casgstatus(gp, _Gwaiting, _Grunning)
}
// gcMark runs the mark (or, for concurrent GC, mark termination)
// STW is in effect at this point.
//TODO go:nowritebarrier
func gc(start_time int64, eagersweep bool) {
if _DebugGCPtrs {
print("GC start\n")
}
gcphase = _GCmarktermination
func gcMark(start_time int64) {
if debug.allocfreetrace > 0 {
tracegc()
}
_g_ := getg()
_g_.m.traceback = 2
t0 := start_time
work.tstart = start_time
gcphase = _GCmarktermination
var t1 int64
if debug.gctrace > 0 {
t1 = nanotime()
}
if !checkmarkphase {
// TODO(austin) This is a noop beceause we should
// already have swept everything to the current
// sweepgen.
finishsweep_m() // skip during checkmark debug phase.
}
// Cache runtime.mheap_.allspans in work.spans to avoid conflicts with
// resizing/freeing allspans.
// New spans can be created while GC progresses, but they are not garbage for
// this round:
// - new stack spans can be created even while the world is stopped.
// - new malloc spans can be created during the concurrent sweep
// Even if this is stop-the-world, a concurrent exitsyscall can allocate a stack from heap.
lock(&mheap_.lock)
// Free the old cached sweep array if necessary.
if work.spans != nil && &work.spans[0] != &h_allspans[0] {
sysFree(unsafe.Pointer(&work.spans[0]), uintptr(len(work.spans))*unsafe.Sizeof(work.spans[0]), &memstats.other_sys)
}
// Cache the current array for marking.
mheap_.gcspans = mheap_.allspans
work.spans = h_allspans
unlock(&mheap_.lock)
gcCopySpans()
work.nwait = 0
work.ndone = 0
@ -615,67 +549,63 @@ func gc(start_time int64, eagersweep bool) {
sweep.nbgsweep = 0
sweep.npausesweep = 0
}
}
if debug.gccheckmark > 0 {
if !checkmarkphase {
// first half of two-pass; don't set up sweep
return
}
checkmarkphase = false // done checking marks
clearCheckmarks()
}
func gcSweep(mode int) {
gcCopySpans()
// See the comment in the beginning of this function as to why we need the following.
// Even if this is still stop-the-world, a concurrent exitsyscall can allocate a stack from heap.
lock(&mheap_.lock)
// Free the old cached mark array if necessary.
if work.spans != nil && &work.spans[0] != &h_allspans[0] {
sysFree(unsafe.Pointer(&work.spans[0]), uintptr(len(work.spans))*unsafe.Sizeof(work.spans[0]), &memstats.other_sys)
}
// Cache the current array for sweeping.
mheap_.gcspans = mheap_.allspans
mheap_.sweepgen += 2
mheap_.sweepdone = 0
work.spans = h_allspans
sweep.spanidx = 0
unlock(&mheap_.lock)
if _ConcurrentSweep && !eagersweep {
lock(&gclock)
if !sweep.started {
go bgsweep()
sweep.started = true
} else if sweep.parked {
sweep.parked = false
ready(sweep.g)
}
unlock(&gclock)
} else {
if !_ConcurrentSweep || mode == gcForceBlockMode {
// Special case synchronous sweep.
// Sweep all spans eagerly.
for sweepone() != ^uintptr(0) {
sweep.npausesweep++
}
// Do an additional mProf_GC, because all 'free' events are now real as well.
mProf_GC()
mProf_GC()
return
}
// Background sweep.
lock(&sweep.lock)
if !sweep.started {
go bgsweep()
sweep.started = true
} else if sweep.parked {
sweep.parked = false
ready(sweep.g)
}
unlock(&sweep.lock)
mProf_GC()
_g_.m.traceback = 0
}
if _DebugGCPtrs {
print("GC end\n")
func gcCopySpans() {
// Cache runtime.mheap_.allspans in work.spans to avoid conflicts with
// resizing/freeing allspans.
// New spans can be created while GC progresses, but they are not garbage for
// this round:
// - new stack spans can be created even while the world is stopped.
// - new malloc spans can be created during the concurrent sweep
// Even if this is stop-the-world, a concurrent exitsyscall can allocate a stack from heap.
lock(&mheap_.lock)
// Free the old cached mark array if necessary.
if work.spans != nil && &work.spans[0] != &h_allspans[0] {
sysFree(unsafe.Pointer(&work.spans[0]), uintptr(len(work.spans))*unsafe.Sizeof(work.spans[0]), &memstats.other_sys)
}
// Cache the current array for sweeping.
mheap_.gcspans = mheap_.allspans
work.spans = h_allspans
unlock(&mheap_.lock)
}
// Hooks for other packages
//go:linkname runtime_debug_freeOSMemory runtime/debug.freeOSMemory
func runtime_debug_freeOSMemory() {
gogc(2) // force GC and do eager sweep
systemstack(scavenge_m)
}
var poolcleanup func()
//go:linkname sync_runtime_registerPoolCleanup sync.runtime_registerPoolCleanup

View File

@ -86,7 +86,7 @@ func markroot(desc *parfor, i uint32) {
if s.state != mSpanInUse {
continue
}
if !checkmarkphase && s.sweepgen != sg {
if !useCheckmark && s.sweepgen != sg {
// sweepgen was updated (+2) during non-checkmark GC pass
print("sweep ", s.sweepgen, " ", sg, "\n")
throw("gc: unswept span")
@ -458,7 +458,7 @@ func scanobject(b, n uintptr, ptrmask *uint8, gcw *gcWorkProducer) {
}
if bits&typePointer != typePointer {
print("gc checkmarkphase=", checkmarkphase, " b=", hex(b), " ptrmask=", ptrmask, "\n")
print("gc useCheckmark=", useCheckmark, " b=", hex(b), " ptrmask=", ptrmask, "\n")
throw("unexpected garbage collection bits")
}
@ -470,7 +470,7 @@ func scanobject(b, n uintptr, ptrmask *uint8, gcw *gcWorkProducer) {
continue
}
if mheap_.shadow_enabled && debug.wbshadow >= 2 && debug.gccheckmark > 0 && checkmarkphase {
if mheap_.shadow_enabled && debug.wbshadow >= 2 && debug.gccheckmark > 0 && useCheckmark {
checkwbshadow((*uintptr)(unsafe.Pointer(b + i)))
}
@ -528,7 +528,7 @@ func greyobject(obj, base, off uintptr, hbits heapBits, gcw *gcWorkProducer) {
throw("greyobject: obj not pointer-aligned")
}
if checkmarkphase {
if useCheckmark {
if !hbits.isMarked() {
print("runtime:greyobject: checkmarks finds unexpected unmarked object obj=", hex(obj), "\n")
print("runtime: found obj at *(", hex(base), "+", hex(off), ")\n")
@ -591,7 +591,7 @@ func greyobject(obj, base, off uintptr, hbits heapBits, gcw *gcWorkProducer) {
hbits.setMarked()
}
if !checkmarkphase && hbits.typeBits() == typeDead {
if !useCheckmark && hbits.typeBits() == typeDead {
return // noscan object
}
@ -611,7 +611,7 @@ func gcmarknewobject_m(obj uintptr) {
if gcphase != _GCmarktermination {
throw("marking new object while not in mark termination phase")
}
if checkmarkphase { // The world should be stopped so this should not happen.
if useCheckmark { // The world should be stopped so this should not happen.
throw("gcmarknewobject called while doing checkmark")
}
@ -636,13 +636,14 @@ func gcmarknewobject_m(obj uintptr) {
// there are no more pointers in the object. This information is held
// in the second nibble.
// When marking an object if the bool checkmarkphase is true one uses the above
// encoding, otherwise one uses the bitMarked bit in the lower two bits
// of the nibble.
var checkmarkphase = false
// If useCheckmark is true, marking of an object uses the
// checkmark bits (encoding above) instead of the standard
// mark bits.
var useCheckmark = false
//go:nowritebarrier
func initCheckmarks() {
useCheckmark = true
for _, s := range work.spans {
if s.state == _MSpanInUse {
heapBitsForSpan(s.base()).initCheckmarkSpan(s.layout())
@ -651,6 +652,7 @@ func initCheckmarks() {
}
func clearCheckmarks() {
useCheckmark = false
for _, s := range work.spans {
if s.state == _MSpanInUse {
heapBitsForSpan(s.base()).clearCheckmarkSpan(s.layout())

View File

@ -11,8 +11,8 @@ import "unsafe"
var sweep sweepdata
// State of background sweep.
// Protected by gclock.
type sweepdata struct {
lock mutex
g *g
parked bool
started bool
@ -23,8 +23,6 @@ type sweepdata struct {
npausesweep uint32
}
var gclock mutex
//go:nowritebarrier
func finishsweep_m() {
// The world is stopped so we should be able to complete the sweeps
@ -51,16 +49,16 @@ func bgsweep() {
sweep.nbgsweep++
Gosched()
}
lock(&gclock)
lock(&sweep.lock)
if !gosweepdone() {
// This can happen if a GC runs between
// gosweepone returning ^0 above
// and the lock being acquired.
unlock(&gclock)
unlock(&sweep.lock)
continue
}
sweep.parked = true
goparkunlock(&gclock, "GC sweep wait", traceEvGoBlock)
goparkunlock(&sweep.lock, "GC sweep wait", traceEvGoBlock)
}
}
@ -145,10 +143,6 @@ func mSpan_EnsureSwept(s *mspan) {
// caller takes care of it.
//TODO go:nowritebarrier
func mSpan_Sweep(s *mspan, preserve bool) bool {
if checkmarkphase {
throw("MSpan_Sweep: checkmark only runs in STW and after the sweep")
}
// It's critical that we enter this function with preemption disabled,
// GC must not start while we are in the middle of this function.
_g_ := getg()

View File

@ -747,8 +747,10 @@ func mHeap_Scavenge(k int32, now, limit uint64) {
}
}
func scavenge_m() {
mHeap_Scavenge(-1, ^uint64(0), 0)
//go:linkname runtime_debug_freeOSMemory runtime/debug.freeOSMemory
func runtime_debug_freeOSMemory() {
startGC(gcForceBlockMode)
systemstack(func() { mHeap_Scavenge(-1, ^uint64(0), 0) })
}
// Initialize a new span with the given start and npages.

View File

@ -1,656 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pprof_test
import (
"bufio"
"bytes"
"fmt"
"io"
"os/exec"
"sort"
"strconv"
"strings"
)
// Event describes one event in the trace.
type Event struct {
off int // offset in input file (for debugging and error reporting)
typ byte // one of traceEv*
ts int64 // timestamp in nanoseconds
p int // P on which the event happened (can be one of timerP, netpollP, syscallP)
g uint64 // G on which the event happened
stkID uint64 // unique stack ID
stk []*Frame // stack trace (can be empty)
args [2]uint64 // event-type-specific arguments
// linked event (can be nil), depends on event type:
// for GCStart: the GCStop
// for GCScanStart: the GCScanDone
// for GCSweepStart: the GCSweepDone
// for GoCreate: first GoStart of the created goroutine
// for GoStart: the associated GoEnd, GoBlock or other blocking event
// for GoSched/GoPreempt: the next GoStart
// for GoBlock and other blocking events: the unblock event
// for GoUnblock: the associated GoStart
// for blocking GoSysCall: the associated GoSysExit
// for GoSysExit: the next GoStart
link *Event
}
// Frame is a frame in stack traces.
type Frame struct {
pc uint64
fn string
file string
line int
}
const (
// Special P identifiers:
timerP = 1000000 + iota // depicts timer unblocks
netpollP // depicts network unblocks
syscallP // depicts returns from syscalls
)
// parseTrace parses, post-processes and verifies the trace.
func parseTrace(r io.Reader) ([]*Event, error) {
rawEvents, err := readTrace(r)
if err != nil {
return nil, err
}
events, err := parseEvents(rawEvents)
if err != nil {
return nil, err
}
err = postProcessTrace(events)
if err != nil {
return nil, err
}
return events, nil
}
// RawEvent is a helper type used during parsing.
type RawEvent struct {
off int
typ byte
args []uint64
}
// readTrace does wire-format parsing and verification.
// It does not care about specific event types and argument meaning.
func readTrace(r io.Reader) ([]RawEvent, error) {
// Read and validate trace header.
var buf [8]byte
off, err := r.Read(buf[:])
if off != 8 || err != nil {
return nil, fmt.Errorf("failed to read header: read %v, err %v", off, err)
}
if bytes.Compare(buf[:], []byte("gotrace\x00")) != 0 {
return nil, fmt.Errorf("not a trace file")
}
// Read events.
var events []RawEvent
for {
// Read event type and number of arguments (1 byte).
off0 := off
n, err := r.Read(buf[:1])
if err == io.EOF {
break
}
if err != nil || n != 1 {
return nil, fmt.Errorf("failed to read trace at offset 0x%x: n=%v err=%v", off0, n, err)
}
off += n
typ := buf[0] << 2 >> 2
narg := buf[0]>>6 + 1
ev := RawEvent{typ: typ, off: off0}
if narg <= 3 {
for i := 0; i < int(narg); i++ {
var v uint64
v, off, err = readVal(r, off)
if err != nil {
return nil, err
}
ev.args = append(ev.args, v)
}
} else {
// If narg == 4, the first value is length of the event in bytes.
var v uint64
v, off, err = readVal(r, off)
if err != nil {
return nil, err
}
evLen := v
off1 := off
for evLen > uint64(off-off1) {
v, off, err = readVal(r, off)
if err != nil {
return nil, err
}
ev.args = append(ev.args, v)
}
if evLen != uint64(off-off1) {
return nil, fmt.Errorf("event has wrong length at offset 0x%x: want %v, got %v", off0, evLen, off-off1)
}
}
events = append(events, ev)
}
return events, nil
}
// Parse events transforms raw events into events.
// It does analyze and verify per-event-type arguments.
func parseEvents(rawEvents []RawEvent) (events []*Event, err error) {
var ticksPerSec, lastTs int64
var lastG, timerGoid uint64
var lastP int
lastGs := make(map[int]uint64) // last goroutine running on P
stacks := make(map[uint64][]*Frame)
for _, raw := range rawEvents {
if raw.typ == traceEvNone || raw.typ >= traceEvCount {
err = fmt.Errorf("unknown event type %v at offset 0x%x", raw.typ, raw.off)
return
}
desc := evDescriptions[raw.typ]
if desc.name == "" {
err = fmt.Errorf("missing description for event type %v", raw.typ)
return
}
if raw.typ != traceEvStack {
narg := len(desc.args)
if desc.stack {
narg++
}
if raw.typ != traceEvBatch && raw.typ != traceEvFrequency && raw.typ != traceEvTimerGoroutine {
narg++ // timestamp
}
if len(raw.args) != narg {
err = fmt.Errorf("%v has wrong number of arguments at offset 0x%x: want %v, got %v",
desc.name, raw.off, narg, len(raw.args))
return
}
}
switch raw.typ {
case traceEvBatch:
lastGs[lastP] = lastG
lastP = int(raw.args[0])
lastG = lastGs[lastP]
lastTs = int64(raw.args[1])
case traceEvFrequency:
ticksPerSec = int64(raw.args[0])
if ticksPerSec <= 0 {
err = fmt.Errorf("traceEvFrequency contains invalid frequency %v at offset 0x%x",
ticksPerSec, raw.off)
return
}
case traceEvTimerGoroutine:
timerGoid = raw.args[0]
case traceEvStack:
if len(raw.args) < 2 {
err = fmt.Errorf("traceEvStack has wrong number of arguments at offset 0x%x: want at least 2, got %v",
raw.off, len(raw.args))
return
}
size := raw.args[1]
if size > 1000 {
err = fmt.Errorf("traceEvStack has bad number of frames at offset 0x%x: %v",
raw.off, size)
return
}
id := raw.args[0]
if id != 0 && size > 0 {
stk := make([]*Frame, size)
for i := 0; i < int(size); i++ {
stk[i] = &Frame{pc: raw.args[i+2]}
}
stacks[id] = stk
}
default:
e := &Event{off: raw.off, typ: raw.typ, p: lastP, g: lastG}
e.ts = lastTs + int64(raw.args[0])
lastTs = e.ts
for i := range desc.args {
e.args[i] = raw.args[i+1]
}
if desc.stack {
e.stkID = raw.args[len(desc.args)+1]
}
switch raw.typ {
case traceEvGoStart:
lastG = e.args[0]
e.g = lastG
case traceEvGCStart, traceEvGCDone, traceEvGCScanStart, traceEvGCScanDone:
e.g = 0
case traceEvGoEnd, traceEvGoStop, traceEvGoSched, traceEvGoPreempt,
traceEvGoSleep, traceEvGoBlock, traceEvGoBlockSend, traceEvGoBlockRecv,
traceEvGoBlockSelect, traceEvGoBlockSync, traceEvGoBlockCond, traceEvGoBlockNet,
traceEvGoSysBlock:
lastG = 0
}
events = append(events, e)
}
}
// Attach stack traces.
for _, ev := range events {
if ev.stkID != 0 {
ev.stk = stacks[ev.stkID]
}
}
// Sort by time and translate cpu ticks to real time.
sort.Sort(EventList(events))
if ticksPerSec == 0 {
err = fmt.Errorf("no traceEvFrequency event")
return
}
minTs := events[0].ts
for _, ev := range events {
ev.ts = (ev.ts - minTs) * 1e9 / ticksPerSec
// Move timers and syscalls to separate fake Ps.
if timerGoid != 0 && ev.g == timerGoid && ev.typ == traceEvGoUnblock {
ev.p = timerP
}
if ev.typ == traceEvGoSysExit {
ev.p = syscallP
ev.g = ev.args[0]
}
}
return
}
// postProcessTrace does inter-event verification and information restoration.
// The resulting trace is guaranteed to be consistent
// (for example, a P does not run two Gs at the same time, or a G is indeed
// blocked before an unblock event).
func postProcessTrace(events []*Event) error {
const (
gDead = iota
gRunnable
gRunning
gWaiting
)
type gdesc struct {
state int
ev *Event
evStart *Event
}
type pdesc struct {
running bool
g uint64
evGC *Event
evScan *Event
evSweep *Event
}
gs := make(map[uint64]gdesc)
ps := make(map[int]pdesc)
gs[0] = gdesc{state: gRunning}
checkRunning := func(p pdesc, g gdesc, ev *Event) error {
name := evDescriptions[ev.typ].name
if g.state != gRunning {
return fmt.Errorf("g %v is not running while %v (offset %v, time %v)", ev.g, name, ev.off, ev.ts)
}
if p.g != ev.g {
return fmt.Errorf("p %v is not running g %v while %v (offset %v, time %v)", ev.p, ev.g, name, ev.off, ev.ts)
}
return nil
}
for _, ev := range events {
g := gs[ev.g]
p := ps[ev.p]
switch ev.typ {
case traceEvProcStart:
if p.running {
return fmt.Errorf("p %v is running before start (offset %v, time %v)", ev.p, ev.off, ev.ts)
}
p.running = true
case traceEvProcStop:
if !p.running {
return fmt.Errorf("p %v is not running before stop (offset %v, time %v)", ev.p, ev.off, ev.ts)
}
if p.g != 0 {
return fmt.Errorf("p %v is running a goroutine %v during stop (offset %v, time %v)", ev.p, p.g, ev.off, ev.ts)
}
p.running = false
case traceEvGCStart:
if p.evGC != nil {
return fmt.Errorf("previous GC is not ended before a new one (offset %v, time %v)", ev.off, ev.ts)
}
p.evGC = ev
case traceEvGCDone:
if p.evGC == nil {
return fmt.Errorf("bogus GC end (offset %v, time %v)", ev.off, ev.ts)
}
p.evGC.link = ev
p.evGC = nil
case traceEvGCScanStart:
if p.evScan != nil {
return fmt.Errorf("previous scanning is not ended before a new one (offset %v, time %v)", ev.off, ev.ts)
}
p.evScan = ev
case traceEvGCScanDone:
if p.evScan == nil {
return fmt.Errorf("bogus scanning end (offset %v, time %v)", ev.off, ev.ts)
}
p.evScan.link = ev
p.evScan = nil
case traceEvGCSweepStart:
if p.evSweep != nil {
return fmt.Errorf("previous sweeping is not ended before a new one (offset %v, time %v)", ev.off, ev.ts)
}
p.evSweep = ev
case traceEvGCSweepDone:
if p.evSweep == nil {
return fmt.Errorf("bogus sweeping end (offset %v, time %v)", ev.off, ev.ts)
}
p.evSweep.link = ev
p.evSweep = nil
case traceEvGoWaiting:
g1 := gs[ev.args[0]]
if g1.state != gRunnable {
return fmt.Errorf("g %v is not runnable before traceEvGoWaiting (offset %v, time %v)", ev.args[0], ev.off, ev.ts)
}
g1.state = gWaiting
gs[ev.args[0]] = g1
case traceEvGoInSyscall:
// this case is intentionally left blank
case traceEvGoCreate:
if err := checkRunning(p, g, ev); err != nil {
return err
}
if _, ok := gs[ev.args[0]]; ok {
return fmt.Errorf("g %v already exists (offset %v, time %v)", ev.args[0], ev.off, ev.ts)
}
gs[ev.args[0]] = gdesc{state: gRunnable, ev: ev}
case traceEvGoStart:
if g.state != gRunnable {
return fmt.Errorf("g %v is not runnable before start (offset %v, time %v)", ev.g, ev.off, ev.ts)
}
if p.g != 0 {
return fmt.Errorf("p %v is already running g %v while start g %v (offset %v, time %v)", ev.p, p.g, ev.g, ev.off, ev.ts)
}
g.state = gRunning
g.evStart = ev
p.g = ev.g
if g.ev != nil {
if g.ev.typ == traceEvGoCreate {
// +1 because symblizer expects return pc.
ev.stk = []*Frame{&Frame{pc: g.ev.args[1] + 1}}
}
g.ev.link = ev
g.ev = nil
}
case traceEvGoEnd, traceEvGoStop:
if err := checkRunning(p, g, ev); err != nil {
return err
}
g.evStart.link = ev
g.evStart = nil
g.state = gDead
p.g = 0
case traceEvGoSched, traceEvGoPreempt:
if err := checkRunning(p, g, ev); err != nil {
return err
}
g.state = gRunnable
g.evStart.link = ev
g.evStart = nil
p.g = 0
g.ev = ev
case traceEvGoUnblock:
if g.state != gRunning {
return fmt.Errorf("g %v is not running while unpark (offset %v, time %v)", ev.g, ev.off, ev.ts)
}
if ev.p != timerP && p.g != ev.g {
return fmt.Errorf("p %v is not running g %v while unpark (offset %v, time %v)", ev.p, ev.g, ev.off, ev.ts)
}
g1 := gs[ev.args[0]]
if g1.state != gWaiting {
return fmt.Errorf("g %v is not waiting before unpark (offset %v, time %v)", ev.args[0], ev.off, ev.ts)
}
if g1.ev != nil && g1.ev.typ == traceEvGoBlockNet && ev.p != timerP {
ev.p = netpollP
}
if g1.ev != nil {
g1.ev.link = ev
}
g1.state = gRunnable
g1.ev = ev
gs[ev.args[0]] = g1
case traceEvGoSysCall:
if err := checkRunning(p, g, ev); err != nil {
return err
}
g.ev = ev
case traceEvGoSysBlock:
if err := checkRunning(p, g, ev); err != nil {
return err
}
g.state = gRunnable
g.evStart.link = ev
g.evStart = nil
p.g = 0
case traceEvGoSysExit:
if g.state != gRunnable {
return fmt.Errorf("g %v is not runnable during syscall exit (offset %v, time %v)", ev.g, ev.off, ev.ts)
}
if g.ev != nil && g.ev.typ == traceEvGoSysCall {
g.ev.link = ev
}
g.ev = ev
case traceEvGoSleep, traceEvGoBlock, traceEvGoBlockSend, traceEvGoBlockRecv,
traceEvGoBlockSelect, traceEvGoBlockSync, traceEvGoBlockCond, traceEvGoBlockNet:
if err := checkRunning(p, g, ev); err != nil {
return err
}
g.state = gWaiting
g.ev = ev
g.evStart.link = ev
g.evStart = nil
p.g = 0
}
gs[ev.g] = g
ps[ev.p] = p
}
return nil
}
// symbolizeTrace attaches func/file/line info to stack traces.
func symbolizeTrace(events []*Event, bin string) error {
// First, collect and dedup all pcs.
pcs := make(map[uint64]*Frame)
for _, ev := range events {
for _, f := range ev.stk {
pcs[f.pc] = nil
}
}
// Start addr2line.
cmd := exec.Command("go", "tool", "addr2line", bin)
in, err := cmd.StdinPipe()
if err != nil {
return fmt.Errorf("failed to pipe addr2line stdin: %v", err)
}
out, err := cmd.StdoutPipe()
if err != nil {
return fmt.Errorf("failed to pipe addr2line stdout: %v", err)
}
err = cmd.Start()
if err != nil {
return fmt.Errorf("failed to start addr2line: %v", err)
}
outb := bufio.NewReader(out)
// Write all pcs to addr2line.
// Need to copy pcs to an array, because map iteration order is non-deterministic.
var pcArray []uint64
for pc := range pcs {
pcArray = append(pcArray, pc)
_, err := fmt.Fprintf(in, "0x%x\n", pc-1)
if err != nil {
return fmt.Errorf("failed to write to addr2line: %v", err)
}
}
in.Close()
// Read in answers.
for _, pc := range pcArray {
fn, err := outb.ReadString('\n')
if err != nil {
return fmt.Errorf("failed to read from addr2line: %v", err)
}
file, err := outb.ReadString('\n')
if err != nil {
return fmt.Errorf("failed to read from addr2line: %v", err)
}
f := &Frame{pc: pc}
f.fn = fn[:len(fn)-1]
f.file = file[:len(file)-1]
if colon := strings.LastIndex(f.file, ":"); colon != -1 {
ln, err := strconv.Atoi(f.file[colon+1:])
if err == nil {
f.file = f.file[:colon]
f.line = ln
}
}
pcs[pc] = f
}
cmd.Wait()
// Replace frames in events array.
for _, ev := range events {
for i, f := range ev.stk {
ev.stk[i] = pcs[f.pc]
}
}
return nil
}
// readVal reads unsigned base-128 value from r.
func readVal(r io.Reader, off0 int) (v uint64, off int, err error) {
off = off0
for i := 0; i < 10; i++ {
var buf [1]byte
var n int
n, err = r.Read(buf[:])
if err != nil || n != 1 {
return 0, 0, fmt.Errorf("failed to read trace at offset: read %v, error %v", off0, n, err)
}
off++
v |= uint64(buf[0]&0x7f) << (uint(i) * 7)
if buf[0]&0x80 == 0 {
return
}
}
return 0, 0, fmt.Errorf("bad value at offset 0x%x", off0)
}
type EventList []*Event
func (l EventList) Len() int {
return len(l)
}
func (l EventList) Less(i, j int) bool {
return l[i].ts < l[j].ts
}
func (l EventList) Swap(i, j int) {
l[i], l[j] = l[j], l[i]
}
// Event types in the trace.
// Verbatim copy from src/runtime/trace.go.
const (
traceEvNone = 0 // unused
traceEvBatch = 1 // start of per-P batch of events [pid, timestamp]
traceEvFrequency = 2 // contains tracer timer frequency [frequency (ticks per second)]
traceEvStack = 3 // stack [stack id, number of PCs, array of PCs]
traceEvGomaxprocs = 4 // current value of GOMAXPROCS [timestamp, GOMAXPROCS, stack id]
traceEvProcStart = 5 // start of P [timestamp]
traceEvProcStop = 6 // stop of P [timestamp]
traceEvGCStart = 7 // GC start [timestamp, stack id]
traceEvGCDone = 8 // GC done [timestamp]
traceEvGCScanStart = 9 // GC scan start [timestamp]
traceEvGCScanDone = 10 // GC scan done [timestamp]
traceEvGCSweepStart = 11 // GC sweep start [timestamp, stack id]
traceEvGCSweepDone = 12 // GC sweep done [timestamp]
traceEvGoCreate = 13 // goroutine creation [timestamp, new goroutine id, start PC, stack id]
traceEvGoStart = 14 // goroutine starts running [timestamp, goroutine id]
traceEvGoEnd = 15 // goroutine ends [timestamp]
traceEvGoStop = 16 // goroutine stops (like in select{}) [timestamp, stack]
traceEvGoSched = 17 // goroutine calls Gosched [timestamp, stack]
traceEvGoPreempt = 18 // goroutine is preempted [timestamp, stack]
traceEvGoSleep = 19 // goroutine calls Sleep [timestamp, stack]
traceEvGoBlock = 20 // goroutine blocks [timestamp, stack]
traceEvGoUnblock = 21 // goroutine is unblocked [timestamp, goroutine id, stack]
traceEvGoBlockSend = 22 // goroutine blocks on chan send [timestamp, stack]
traceEvGoBlockRecv = 23 // goroutine blocks on chan recv [timestamp, stack]
traceEvGoBlockSelect = 24 // goroutine blocks on select [timestamp, stack]
traceEvGoBlockSync = 25 // goroutine blocks on Mutex/RWMutex [timestamp, stack]
traceEvGoBlockCond = 26 // goroutine blocks on Cond [timestamp, stack]
traceEvGoBlockNet = 27 // goroutine blocks on network [timestamp, stack]
traceEvGoSysCall = 28 // syscall enter [timestamp, stack]
traceEvGoSysExit = 29 // syscall exit [timestamp, goroutine id]
traceEvGoSysBlock = 30 // syscall blocks [timestamp, stack]
traceEvGoWaiting = 31 // denotes that goroutine is blocked when tracing starts [goroutine id]
traceEvGoInSyscall = 32 // denotes that goroutine is in syscall when tracing starts [goroutine id]
traceEvHeapAlloc = 33 // memstats.heap_alloc change [timestamp, heap_alloc]
traceEvNextGC = 34 // memstats.next_gc change [timestamp, next_gc]
traceEvTimerGoroutine = 35 // denotes timer goroutine [timer goroutine id]
traceEvCount = 36
)
var evDescriptions = [traceEvCount]struct {
name string
stack bool
args []string
}{
traceEvNone: {"None", false, []string{}},
traceEvBatch: {"Batch", false, []string{"p", "ticks"}},
traceEvFrequency: {"Frequency", false, []string{"freq"}},
traceEvStack: {"Stack", false, []string{"id", "siz"}},
traceEvGomaxprocs: {"Gomaxprocs", true, []string{"procs"}},
traceEvProcStart: {"ProcStart", false, []string{}},
traceEvProcStop: {"ProcStop", false, []string{}},
traceEvGCStart: {"GCStart", true, []string{}},
traceEvGCDone: {"GCDone", false, []string{}},
traceEvGCScanStart: {"GCScanStart", false, []string{}},
traceEvGCScanDone: {"GCScanDone", false, []string{}},
traceEvGCSweepStart: {"GCSweepStart", true, []string{}},
traceEvGCSweepDone: {"GCSweepDone", false, []string{}},
traceEvGoCreate: {"GoCreate", true, []string{"g", "pc"}},
traceEvGoStart: {"GoStart", false, []string{"g"}},
traceEvGoEnd: {"GoEnd", false, []string{}},
traceEvGoStop: {"GoStop", true, []string{}},
traceEvGoSched: {"GoSched", true, []string{}},
traceEvGoPreempt: {"GoPreempt", true, []string{}},
traceEvGoSleep: {"GoSleep", true, []string{}},
traceEvGoBlock: {"GoBlock", true, []string{}},
traceEvGoUnblock: {"GoUnblock", true, []string{"g"}},
traceEvGoBlockSend: {"GoBlockSend", true, []string{}},
traceEvGoBlockRecv: {"GoBlockRecv", true, []string{}},
traceEvGoBlockSelect: {"GoBlockSelect", true, []string{}},
traceEvGoBlockSync: {"GoBlockSync", true, []string{}},
traceEvGoBlockCond: {"GoBlockCond", true, []string{}},
traceEvGoBlockNet: {"GoBlockNet", true, []string{}},
traceEvGoSysCall: {"GoSysCall", true, []string{}},
traceEvGoSysExit: {"GoSysExit", false, []string{"g"}},
traceEvGoSysBlock: {"GoSysBlock", true, []string{}},
traceEvGoWaiting: {"GoWaiting", false, []string{"g"}},
traceEvGoInSyscall: {"GoInSyscall", false, []string{"g"}},
traceEvHeapAlloc: {"HeapAlloc", false, []string{"mem"}},
traceEvNextGC: {"NextGC", false, []string{"mem"}},
traceEvTimerGoroutine: {"TimerGoroutine", false, []string{"g"}},
}

View File

@ -6,6 +6,7 @@ package pprof_test
import (
"bytes"
"internal/trace"
"net"
"os"
"runtime"
@ -66,7 +67,7 @@ func TestTrace(t *testing.T) {
t.Fatalf("failed to start tracing: %v", err)
}
StopTrace()
_, err := parseTrace(buf)
_, err := trace.Parse(buf)
if err != nil {
t.Fatalf("failed to parse trace: %v", err)
}
@ -198,12 +199,153 @@ func TestTraceStress(t *testing.T) {
runtime.GOMAXPROCS(procs)
StopTrace()
_, err = parseTrace(buf)
_, err = trace.Parse(buf)
if err != nil {
t.Fatalf("failed to parse trace: %v", err)
}
}
// Do a bunch of various stuff (timers, GC, network, etc) in a separate goroutine.
// And concurrently with all that start/stop trace 3 times.
func TestTraceStressStartStop(t *testing.T) {
skipTraceTestsIfNeeded(t)
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(8))
outerDone := make(chan bool)
go func() {
defer func() {
outerDone <- true
}()
var wg sync.WaitGroup
done := make(chan bool)
wg.Add(1)
go func() {
<-done
wg.Done()
}()
rp, wp, err := os.Pipe()
if err != nil {
t.Fatalf("failed to create pipe: %v", err)
}
defer func() {
rp.Close()
wp.Close()
}()
wg.Add(1)
go func() {
var tmp [1]byte
rp.Read(tmp[:])
<-done
wg.Done()
}()
time.Sleep(time.Millisecond)
go func() {
runtime.LockOSThread()
for {
select {
case <-done:
return
default:
runtime.Gosched()
}
}
}()
runtime.GC()
// Trigger GC from malloc.
for i := 0; i < 1e3; i++ {
_ = make([]byte, 1<<20)
}
// Create a bunch of busy goroutines to load all Ps.
for p := 0; p < 10; p++ {
wg.Add(1)
go func() {
// Do something useful.
tmp := make([]byte, 1<<16)
for i := range tmp {
tmp[i]++
}
_ = tmp
<-done
wg.Done()
}()
}
// Block in syscall.
wg.Add(1)
go func() {
var tmp [1]byte
rp.Read(tmp[:])
<-done
wg.Done()
}()
runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
// Test timers.
timerDone := make(chan bool)
go func() {
time.Sleep(time.Millisecond)
timerDone <- true
}()
<-timerDone
// A bit of network.
ln, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatalf("listen failed: %v", err)
}
defer ln.Close()
go func() {
c, err := ln.Accept()
if err != nil {
return
}
time.Sleep(time.Millisecond)
var buf [1]byte
c.Write(buf[:])
c.Close()
}()
c, err := net.Dial("tcp", ln.Addr().String())
if err != nil {
t.Fatalf("dial failed: %v", err)
}
var tmp [1]byte
c.Read(tmp[:])
c.Close()
go func() {
runtime.Gosched()
select {}
}()
// Unblock helper goroutines and wait them to finish.
wp.Write(tmp[:])
wp.Write(tmp[:])
close(done)
wg.Wait()
}()
for i := 0; i < 3; i++ {
buf := new(bytes.Buffer)
if err := StartTrace(buf); err != nil {
t.Fatalf("failed to start tracing: %v", err)
}
time.Sleep(time.Millisecond)
StopTrace()
if _, err := trace.Parse(buf); err != nil {
t.Fatalf("failed to parse trace: %v", err)
}
}
<-outerDone
}
func TestTraceSymbolize(t *testing.T) {
skipTraceTestsIfNeeded(t)
if runtime.GOOS == "nacl" {
@ -215,24 +357,24 @@ func TestTraceSymbolize(t *testing.T) {
}
runtime.GC()
StopTrace()
events, err := parseTrace(buf)
events, err := trace.Parse(buf)
if err != nil {
t.Fatalf("failed to parse trace: %v", err)
}
err = symbolizeTrace(events, os.Args[0])
err = trace.Symbolize(events, os.Args[0])
if err != nil {
t.Fatalf("failed to symbolize trace: %v", err)
}
found := false
eventLoop:
for _, ev := range events {
if ev.typ != traceEvGCStart {
if ev.Type != trace.EvGCStart {
continue
}
for _, f := range ev.stk {
if strings.HasSuffix(f.file, "trace_test.go") &&
strings.HasSuffix(f.fn, "pprof_test.TestTraceSymbolize") &&
f.line == 216 {
for _, f := range ev.Stk {
if strings.HasSuffix(f.File, "trace_test.go") &&
strings.HasSuffix(f.Fn, "pprof_test.TestTraceSymbolize") &&
f.Line == 358 {
found = true
break eventLoop
}

View File

@ -123,7 +123,7 @@ func forcegchelper() {
if debug.gctrace > 0 {
println("GC forced")
}
gogc(1)
startGC(gcForceMode)
}
}

View File

@ -23,6 +23,9 @@ func RaceSemrelease(s *uint32)
// private interface for the runtime
const raceenabled = true
// For all functions accepting callerpc and pc,
// callerpc is a return PC of the function that calls this function,
// pc is start PC of the function that calls this function.
func raceReadObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) {
kind := t.kind & kindMask
if kind == kindArray || kind == kindStruct {

View File

@ -9,4 +9,4 @@ $ ./buildgo.sh
Tested with gcc 4.6.1 and 4.7.0. On Windows it's built with 64-bit MinGW.
Current runtime is built on rev 215000.
Current runtime is built on rev 229396.

View File

@ -226,26 +226,6 @@ func racereadrangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
}
}
//go:nosplit
func racewriteobjectpc(addr unsafe.Pointer, t *_type, callpc, pc uintptr) {
kind := t.kind & _KindMask
if kind == _KindArray || kind == _KindStruct {
racewriterangepc(addr, t.size, callpc, pc)
} else {
racewritepc(addr, callpc, pc)
}
}
//go:nosplit
func racereadobjectpc(addr unsafe.Pointer, t *_type, callpc, pc uintptr) {
kind := t.kind & _KindMask
if kind == _KindArray || kind == _KindStruct {
racereadrangepc(addr, t.size, callpc, pc)
} else {
racereadpc(addr, callpc, pc)
}
}
//go:nosplit
func raceacquire(addr unsafe.Pointer) {
raceacquireg(getg(), addr)

View File

@ -58,6 +58,7 @@ TEXT runtime·racereadpc(SB), NOSPLIT, $0-24
MOVQ addr+0(FP), RARG1
MOVQ callpc+8(FP), RARG2
MOVQ pc+16(FP), RARG3
ADDQ $1, RARG3 // pc is function start, tsan wants return address
// void __tsan_read_pc(ThreadState *thr, void *addr, void *callpc, void *pc);
MOVQ $__tsan_read_pc(SB), AX
JMP racecalladdr<>(SB)
@ -81,6 +82,7 @@ TEXT runtime·racewritepc(SB), NOSPLIT, $0-24
MOVQ addr+0(FP), RARG1
MOVQ callpc+8(FP), RARG2
MOVQ pc+16(FP), RARG3
ADDQ $1, RARG3 // pc is function start, tsan wants return address
// void __tsan_write_pc(ThreadState *thr, void *addr, void *callpc, void *pc);
MOVQ $__tsan_write_pc(SB), AX
JMP racecalladdr<>(SB)
@ -105,6 +107,7 @@ TEXT runtime·racereadrangepc1(SB), NOSPLIT, $0-24
MOVQ addr+0(FP), RARG1
MOVQ size+8(FP), RARG2
MOVQ pc+16(FP), RARG3
ADDQ $1, RARG3 // pc is function start, tsan wants return address
// void __tsan_read_range(ThreadState *thr, void *addr, uintptr size, void *pc);
MOVQ $__tsan_read_range(SB), AX
JMP racecalladdr<>(SB)
@ -129,6 +132,7 @@ TEXT runtime·racewriterangepc1(SB), NOSPLIT, $0-24
MOVQ addr+0(FP), RARG1
MOVQ size+8(FP), RARG2
MOVQ pc+16(FP), RARG3
ADDQ $1, RARG3 // pc is function start, tsan wants return address
// void __tsan_write_range(ThreadState *thr, void *addr, uintptr size, void *pc);
MOVQ $__tsan_write_range(SB), AX
JMP racecalladdr<>(SB)

View File

@ -227,8 +227,6 @@ def lookup_type(name):
except gdb.error:
pass
_rctp_type = gdb.lookup_type("struct reflect.rtype").pointer()
def iface_commontype(obj):
if is_iface(obj):
@ -238,7 +236,7 @@ def iface_commontype(obj):
else:
return
return go_type_ptr.cast(_rctp_type).dereference()
return go_type_ptr.cast(gdb.lookup_type("struct reflect.rtype").pointer()).dereference()
def iface_dtype(obj):

View File

@ -58,13 +58,6 @@ var (
iswindows int32
)
// Information about what cpu features are available.
// Set on startup in asm_{x86/amd64}.s.
var (
//cpuid_ecx uint32
//cpuid_edx uint32
)
func goargs() {
if GOOS == "windows" {
return

View File

@ -562,12 +562,16 @@ var (
goos *int8
ncpu int32
iscgo bool
cpuid_ecx uint32
cpuid_edx uint32
signote note
forcegc forcegcstate
sched schedt
newprocs int32
// Information about what cpu features are available.
// Set on startup in asm_{x86,amd64}.s.
cpuid_ecx uint32
cpuid_edx uint32
lfenceBeforeRdtsc bool
)
/*

View File

@ -1,14 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#define SIG_DFL ((void*)0)
#define SIG_IGN ((void*)1)
typedef void GoSighandler(int32, Siginfo*, void*, G*);
void runtime·setsig(int32, GoSighandler*, bool);
GoSighandler* runtime·getsig(int32);
void runtime·sighandler(int32 sig, Siginfo *info, void *context, G *gp);
void runtime·raise(int32);

View File

@ -190,7 +190,6 @@ func StopTrace() {
}
traceGoSched()
traceGoStart()
for _, p := range &allp {
if p == nil {