1
0
mirror of https://github.com/golang/go synced 2024-11-19 15:34:47 -07:00

runtime/trace: fix corrupted trace during StartTrace

Since Go1.8, different types of GC mark workers were annotated and the
annotation strings were recorded during StartTrace. This change fixes
two issues around the use of traceString from StartTrace here.

1) "failed to parse trace: no consistent ordering of events possible"

This issue is a result of a missing 'batch' event entry. For efficient
tracing, tracer maintains system allocated buffers and once a buffer
is full, it is Flushed out for writing. Moreover, tracing assumes all
the records in the same buffer (batch) are already ordered and implements
more optimization in encoding and defers the completing order
reconstruction till the trace parsing time. Thus, when a Flush happens
and a new buffer is used, the new buffer should contain an event to
indicate the start of a new batch. Before this CL, the batch entry was
written only by traceEvent only when the buffer position is 0 and
wasn't written when flush occurs during traceString.

This CL fixes it by moving the batch entry write to the traceFlush.

2) crash during tracing due to invalid memory access, or during parsing
due to duplicate string entries

This issue is a result of memory allocation during traceString calls.
Execution tracer traces some memory allocation activities. Before this
CL, traceString took the buffer address (*traceBuf) and mutated the buffer.
If memory tracing occurs in the meantime from the same P, the allocation
tracing (traceEvent) will take the same buffer address through the pointer
to the buffer address (**traceBuf), and mutate the buffer.

As a result, one of the followings can happen:
 - the allocation record is overwritten by the following trace string
   record (data loss)
 - if buffer flush occurs during the allocation tracing, traceString
   will attempt to write the string record to the old buffer and
   eventually causes invalid memory access crash.
 - or flush on the same buffer can occur twice (once from the memory
   allocation, and once from the string record write), and in this case
   the trace can contain the same data twice and the parse will complain
   about duplicate string record entries.

This CL fixes the second issue by making the traceString take
**traceBuf (*traceBufPtr).

Change-Id: I24f629758625b38e1916fbfc7d7be6ea210586af
Reviewed-on: https://go-review.googlesource.com/50873
Run-TryBot: Austin Clements <austin@google.com>
Run-TryBot: Hyang-Ah Hana Kim <hyangah@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Austin Clements <austin@google.com>
This commit is contained in:
Hana (Hyang-Ah) Kim 2017-07-24 11:51:03 -04:00 committed by Hyang-Ah Hana Kim
parent af192a3e22
commit d58f4e9b7b
2 changed files with 103 additions and 31 deletions

View File

@ -235,21 +235,21 @@ func StartTrace() error {
trace.timeStart = nanotime()
trace.headerWritten = false
trace.footerWritten = false
trace.strings = make(map[string]uint64)
// string to id mapping
// 0 : reserved for an empty string
// remaining: other strings registered by traceString
trace.stringSeq = 0
trace.strings = make(map[string]uint64)
trace.seqGC = 0
_g_.m.startingtrace = false
trace.enabled = true
// Register runtime goroutine labels.
_, pid, bufp := traceAcquireBuffer()
buf := (*bufp).ptr()
if buf == nil {
buf = traceFlush(0).ptr()
(*bufp).set(buf)
}
for i, label := range gcMarkWorkerModeStrings[:] {
trace.markWorkerLabels[i], buf = traceString(buf, label)
trace.markWorkerLabels[i], bufp = traceString(bufp, pid, label)
}
traceReleaseBuffer(pid)
@ -513,18 +513,12 @@ func traceEvent(ev byte, skip int, args ...uint64) {
buf := (*bufp).ptr()
const maxSize = 2 + 5*traceBytesPerNumber // event type, length, sequence, timestamp, stack id and two add params
if buf == nil || len(buf.arr)-buf.pos < maxSize {
buf = traceFlush(traceBufPtrOf(buf)).ptr()
buf = traceFlush(traceBufPtrOf(buf), pid).ptr()
(*bufp).set(buf)
}
ticks := uint64(cputicks()) / traceTickDiv
tickDiff := ticks - buf.lastTicks
if buf.pos == 0 {
buf.byte(traceEvBatch | 1<<traceArgCountShift)
buf.varint(uint64(pid))
buf.varint(ticks)
tickDiff = 0
}
buf.lastTicks = ticks
narg := byte(len(args))
if skip >= 0 {
@ -602,7 +596,7 @@ func traceReleaseBuffer(pid int32) {
}
// traceFlush puts buf onto stack of full buffers and returns an empty buffer.
func traceFlush(buf traceBufPtr) traceBufPtr {
func traceFlush(buf traceBufPtr, pid int32) traceBufPtr {
owner := trace.lockOwner
dolock := owner == nil || owner != getg().m.curg
if dolock {
@ -623,34 +617,51 @@ func traceFlush(buf traceBufPtr) traceBufPtr {
bufp := buf.ptr()
bufp.link.set(nil)
bufp.pos = 0
bufp.lastTicks = 0
// initialize the buffer for a new batch
ticks := uint64(cputicks()) / traceTickDiv
bufp.lastTicks = ticks
bufp.byte(traceEvBatch | 1<<traceArgCountShift)
bufp.varint(uint64(pid))
bufp.varint(ticks)
if dolock {
unlock(&trace.lock)
}
return buf
}
func traceString(buf *traceBuf, s string) (uint64, *traceBuf) {
// traceString adds a string to the trace.strings and returns the id.
func traceString(bufp *traceBufPtr, pid int32, s string) (uint64, *traceBufPtr) {
if s == "" {
return 0, buf
return 0, bufp
}
if id, ok := trace.strings[s]; ok {
return id, buf
return id, bufp
}
trace.stringSeq++
id := trace.stringSeq
trace.strings[s] = id
// memory allocation in above may trigger tracing and
// cause *bufp changes. Following code now works with *bufp,
// so there must be no memory allocation or any activities
// that causes tracing after this point.
buf := (*bufp).ptr()
size := 1 + 2*traceBytesPerNumber + len(s)
if len(buf.arr)-buf.pos < size {
buf = traceFlush(traceBufPtrOf(buf)).ptr()
if buf == nil || len(buf.arr)-buf.pos < size {
buf = traceFlush(traceBufPtrOf(buf), pid).ptr()
(*bufp).set(buf)
}
buf.byte(traceEvString)
buf.varint(id)
buf.varint(uint64(len(s)))
buf.pos += copy(buf.arr[buf.pos:], s)
return id, buf
(*bufp).set(buf)
return id, bufp
}
// traceAppend appends v to buf in little-endian-base-128 encoding.
@ -780,7 +791,7 @@ func allFrames(pcs []uintptr) []Frame {
// releases all memory and resets state.
func (tab *traceStackTable) dump() {
var tmp [(2 + 4*traceStackSize) * traceBytesPerNumber]byte
buf := traceFlush(0).ptr()
bufp := traceFlush(0, 0)
for _, stk := range tab.tab {
stk := stk.ptr()
for ; stk != nil; stk = stk.link.ptr() {
@ -790,7 +801,7 @@ func (tab *traceStackTable) dump() {
tmpbuf = traceAppend(tmpbuf, uint64(len(frames)))
for _, f := range frames {
var frame traceFrame
frame, buf = traceFrameForPC(buf, f)
frame, bufp = traceFrameForPC(bufp, 0, f)
tmpbuf = traceAppend(tmpbuf, uint64(f.PC))
tmpbuf = traceAppend(tmpbuf, uint64(frame.funcID))
tmpbuf = traceAppend(tmpbuf, uint64(frame.fileID))
@ -798,9 +809,10 @@ func (tab *traceStackTable) dump() {
}
// Now copy to the buffer.
size := 1 + traceBytesPerNumber + len(tmpbuf)
if len(buf.arr)-buf.pos < size {
buf = traceFlush(traceBufPtrOf(buf)).ptr()
if buf := bufp.ptr(); len(buf.arr)-buf.pos < size {
bufp = traceFlush(bufp, 0)
}
buf := bufp.ptr()
buf.byte(traceEvStack | 3<<traceArgCountShift)
buf.varint(uint64(len(tmpbuf)))
buf.pos += copy(buf.arr[buf.pos:], tmpbuf)
@ -808,7 +820,7 @@ func (tab *traceStackTable) dump() {
}
lock(&trace.lock)
traceFullQueue(traceBufPtrOf(buf))
traceFullQueue(bufp)
unlock(&trace.lock)
tab.mem.drop()
@ -821,7 +833,10 @@ type traceFrame struct {
line uint64
}
func traceFrameForPC(buf *traceBuf, f Frame) (traceFrame, *traceBuf) {
// traceFrameForPC records the frame information.
// It may allocate memory.
func traceFrameForPC(buf traceBufPtr, pid int32, f Frame) (traceFrame, traceBufPtr) {
bufp := &buf
var frame traceFrame
fn := f.Function
@ -829,14 +844,14 @@ func traceFrameForPC(buf *traceBuf, f Frame) (traceFrame, *traceBuf) {
if len(fn) > maxLen {
fn = fn[len(fn)-maxLen:]
}
frame.funcID, buf = traceString(buf, fn)
frame.funcID, bufp = traceString(bufp, pid, fn)
frame.line = uint64(f.Line)
file := f.File
if len(file) > maxLen {
file = file[len(file)-maxLen:]
}
frame.fileID, buf = traceString(buf, file)
return frame, buf
frame.fileID, bufp = traceString(bufp, pid, file)
return frame, (*bufp)
}
// traceAlloc is a non-thread-safe region allocator.

View File

@ -7,6 +7,7 @@ package trace_test
import (
"bytes"
"flag"
"internal/race"
"internal/trace"
"io"
"io/ioutil"
@ -14,6 +15,7 @@ import (
"os"
"runtime"
. "runtime/trace"
"strconv"
"sync"
"testing"
"time"
@ -23,6 +25,61 @@ var (
saveTraces = flag.Bool("savetraces", false, "save traces collected by tests")
)
// TestEventBatch tests Flush calls that happen during Start
// don't produce corrupted traces.
func TestEventBatch(t *testing.T) {
if race.Enabled {
t.Skip("skipping in race mode")
}
if testing.Short() {
t.Skip("skipping in short mode")
}
// During Start, bunch of records are written to reflect the current
// snapshot of the program, including state of each goroutines.
// And some string constants are written to the trace to aid trace
// parsing. This test checks Flush of the buffer occurred during
// this process doesn't cause corrupted traces.
// When a Flush is called during Start is complicated
// so we test with a range of number of goroutines hoping that one
// of them triggers Flush.
// This range was chosen to fill up a ~64KB buffer with traceEvGoCreate
// and traceEvGoWaiting events (12~13bytes per goroutine).
for g := 4950; g < 5050; g++ {
n := g
t.Run("G="+strconv.Itoa(n), func(t *testing.T) {
var wg sync.WaitGroup
wg.Add(n)
in := make(chan bool, 1000)
for i := 0; i < n; i++ {
go func() {
<-in
wg.Done()
}()
}
buf := new(bytes.Buffer)
if err := Start(buf); err != nil {
t.Fatalf("failed to start tracing: %v", err)
}
for i := 0; i < n; i++ {
in <- true
}
wg.Wait()
Stop()
_, err := trace.Parse(buf, "")
if err == trace.ErrTimeOrder {
t.Skipf("skipping trace: %v", err)
}
if err != nil {
t.Fatalf("failed to parse trace: %v", err)
}
})
}
}
func TestTraceStartStop(t *testing.T) {
buf := new(bytes.Buffer)
if err := Start(buf); err != nil {