diff --git a/src/runtime/debuglog.go b/src/runtime/debuglog.go new file mode 100644 index 00000000000..3f8481270a4 --- /dev/null +++ b/src/runtime/debuglog.go @@ -0,0 +1,808 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file provides an internal debug logging facility. The debug +// log is a lightweight, in-memory, per-M ring buffer. By default, the +// runtime prints the debug log on panic. +// +// To print something to the debug log, call dlog to obtain a dlogger +// and use the methods on that to add values. The values will be +// space-separated in the output (much like println). +// +// This facility can be enabled by passing -tags debuglog when +// building. Without this tag, dlog calls compile to nothing. + +package runtime + +import ( + "runtime/internal/atomic" + "unsafe" +) + +// debugLogBytes is the size of each per-M ring buffer. This is +// allocated off-heap to avoid blowing up the M and hence the GC'd +// heap size. +const debugLogBytes = 16 << 10 + +// debugLogStringLimit the the maximum number of bytes in a string. +// Above this, the string will be truncated with "..(n more bytes).." +const debugLogStringLimit = debugLogBytes / 8 + +// dlog returns a debug logger. The caller can use methods on the +// returned logger to add values, which will be space-separated in the +// final output, much like println. The caller must call end() to +// finish the message. +// +// dlog can be used from highly-constrained corners of the runtime: it +// is safe to use in the signal handler, from within the write +// barrier, from within the stack implementation, and in places that +// must be recursively nosplit. +// +// This will be compiled away if built without the debuglog build tag. +// However, argument construction may not be. If any of the arguments +// are not literals or trivial expressions, consider protecting the +// call with "if dlogEnabled". +// +//go:nosplit +//go:nowritebarrierrec +func dlog() *dlogger { + if !dlogEnabled { + return nil + } + + // Get the time. + tick, nano := uint64(cputicks()), uint64(nanotime()) + + // Try to get a cached logger. + l := getCachedDlogger() + + // If we couldn't get a cached logger, try to get one from the + // global pool. + if l == nil { + allp := (*uintptr)(unsafe.Pointer(&allDloggers)) + all := (*dlogger)(unsafe.Pointer(atomic.Loaduintptr(allp))) + for l1 := all; l1 != nil; l1 = l1.allLink { + if atomic.Load(&l1.owned) == 0 && atomic.Cas(&l1.owned, 0, 1) { + l = l1 + break + } + } + } + + // If that failed, allocate a new logger. + if l == nil { + l = (*dlogger)(sysAlloc(unsafe.Sizeof(dlogger{}), nil)) + if l == nil { + throw("failed to allocate debug log") + } + l.w.r.data = &l.w.data + l.owned = 1 + + // Prepend to allDloggers list. + headp := (*uintptr)(unsafe.Pointer(&allDloggers)) + for { + head := atomic.Loaduintptr(headp) + l.allLink = (*dlogger)(unsafe.Pointer(head)) + if atomic.Casuintptr(headp, head, uintptr(unsafe.Pointer(l))) { + break + } + } + } + + // If the time delta is getting too high, write a new sync + // packet. We set the limit so we don't write more than 6 + // bytes of delta in the record header. + const deltaLimit = 1<<(3*7) - 1 // ~2ms between sync packets + if tick-l.w.tick > deltaLimit || nano-l.w.nano > deltaLimit { + l.w.writeSync(tick, nano) + } + + // Reserve space for framing header. + l.w.ensure(debugLogHeaderSize) + l.w.write += debugLogHeaderSize + + // Write record header. + l.w.uvarint(tick - l.w.tick) + l.w.uvarint(nano - l.w.nano) + gp := getg() + if gp != nil && gp.m != nil && gp.m.p != 0 { + l.w.varint(int64(gp.m.p.ptr().id)) + } else { + l.w.varint(-1) + } + + return l +} + +// A dlogger writes to the debug log. +// +// To obtain a dlogger, call dlog(). When done with the dlogger, call +// end(). +// +//go:notinheap +type dlogger struct { + w debugLogWriter + + // allLink is the next dlogger in the allDloggers list. + allLink *dlogger + + // owned indicates that this dlogger is owned by an M. This is + // accessed atomically. + owned uint32 +} + +// allDloggers is a list of all dloggers, linked through +// dlogger.allLink. This is accessed atomically. This is prepend only, +// so it doesn't need to protect against ABA races. +var allDloggers *dlogger + +//go:nosplit +func (l *dlogger) end() { + if !dlogEnabled { + return + } + + // Fill in framing header. + size := l.w.write - l.w.r.end + if !l.w.writeFrameAt(l.w.r.end, size) { + throw("record too large") + } + + // Commit the record. + l.w.r.end = l.w.write + + // Attempt to return this logger to the cache. + if putCachedDlogger(l) { + return + } + + // Return the logger to the global pool. + atomic.Store(&l.owned, 0) +} + +const ( + debugLogUnknown = 1 + iota + debugLogBoolTrue + debugLogBoolFalse + debugLogInt + debugLogUint + debugLogHex + debugLogPtr + debugLogString + debugLogConstString + debugLogStringOverflow + + debugLogPC + debugLogTraceback +) + +//go:nosplit +func (l *dlogger) b(x bool) *dlogger { + if !dlogEnabled { + return l + } + if x { + l.w.byte(debugLogBoolTrue) + } else { + l.w.byte(debugLogBoolFalse) + } + return l +} + +//go:nosplit +func (l *dlogger) i(x int) *dlogger { + return l.i64(int64(x)) +} + +//go:nosplit +func (l *dlogger) i8(x int8) *dlogger { + return l.i64(int64(x)) +} + +//go:nosplit +func (l *dlogger) i16(x int16) *dlogger { + return l.i64(int64(x)) +} + +//go:nosplit +func (l *dlogger) i32(x int32) *dlogger { + return l.i64(int64(x)) +} + +//go:nosplit +func (l *dlogger) i64(x int64) *dlogger { + if !dlogEnabled { + return l + } + l.w.byte(debugLogInt) + l.w.varint(x) + return l +} + +//go:nosplit +func (l *dlogger) u(x uint) *dlogger { + return l.u64(uint64(x)) +} + +//go:nosplit +func (l *dlogger) uptr(x uintptr) *dlogger { + return l.u64(uint64(x)) +} + +//go:nosplit +func (l *dlogger) u8(x uint8) *dlogger { + return l.u64(uint64(x)) +} + +//go:nosplit +func (l *dlogger) u16(x uint16) *dlogger { + return l.u64(uint64(x)) +} + +//go:nosplit +func (l *dlogger) u32(x uint32) *dlogger { + return l.u64(uint64(x)) +} + +//go:nosplit +func (l *dlogger) u64(x uint64) *dlogger { + if !dlogEnabled { + return l + } + l.w.byte(debugLogUint) + l.w.uvarint(x) + return l +} + +//go:nosplit +func (l *dlogger) hex(x uint64) *dlogger { + if !dlogEnabled { + return l + } + l.w.byte(debugLogHex) + l.w.uvarint(x) + return l +} + +//go:nosplit +func (l *dlogger) p(x interface{}) *dlogger { + if !dlogEnabled { + return l + } + l.w.byte(debugLogPtr) + if x == nil { + l.w.uvarint(0) + } else { + v := efaceOf(&x) + switch v._type.kind & kindMask { + case kindChan, kindFunc, kindMap, kindPtr, kindUnsafePointer: + l.w.uvarint(uint64(uintptr(v.data))) + default: + throw("not a pointer type") + } + } + return l +} + +//go:nosplit +func (l *dlogger) s(x string) *dlogger { + if !dlogEnabled { + return l + } + str := stringStructOf(&x) + datap := &firstmoduledata + if len(x) > 4 && datap.etext <= uintptr(str.str) && uintptr(str.str) < datap.end { + // String constants are in the rodata section, which + // isn't recorded in moduledata. But it has to be + // somewhere between etext and end. + l.w.byte(debugLogConstString) + l.w.uvarint(uint64(str.len)) + l.w.uvarint(uint64(uintptr(str.str) - datap.etext)) + } else { + l.w.byte(debugLogString) + var b []byte + bb := (*slice)(unsafe.Pointer(&b)) + bb.array = str.str + bb.len, bb.cap = str.len, str.len + if len(b) > debugLogStringLimit { + b = b[:debugLogStringLimit] + } + l.w.uvarint(uint64(len(b))) + l.w.bytes(b) + if len(b) != len(x) { + l.w.byte(debugLogStringOverflow) + l.w.uvarint(uint64(len(x) - len(b))) + } + } + return l +} + +//go:nosplit +func (l *dlogger) pc(x uintptr) *dlogger { + if !dlogEnabled { + return l + } + l.w.byte(debugLogPC) + l.w.uvarint(uint64(x)) + return l +} + +//go:nosplit +func (l *dlogger) traceback(x []uintptr) *dlogger { + if !dlogEnabled { + return l + } + l.w.byte(debugLogTraceback) + l.w.uvarint(uint64(len(x))) + for _, pc := range x { + l.w.uvarint(uint64(pc)) + } + return l +} + +// A debugLogWriter is a ring buffer of binary debug log records. +// +// A log record consists of a 2-byte framing header and a sequence of +// fields. The framing header gives the size of the record as a little +// endian 16-bit value. Each field starts with a byte indicating its +// type, followed by type-specific data. If the size in the framing +// header is 0, it's a sync record consisting of two little endian +// 64-bit values giving a new time base. +// +// Because this is a ring buffer, new records will eventually +// overwrite old records. Hence, it maintains a reader that consumes +// the log as it gets overwritten. That reader state is where an +// actual log reader would start. +// +//go:notinheap +type debugLogWriter struct { + write uint64 + data debugLogBuf + + // tick and nano are the time bases from the most recently + // written sync record. + tick, nano uint64 + + // r is a reader that consumes records as they get overwritten + // by the writer. It also acts as the initial reader state + // when printing the log. + r debugLogReader + + // buf is a scratch buffer for encoding. This is here to + // reduce stack usage. + buf [10]byte +} + +//go:notinheap +type debugLogBuf [debugLogBytes]byte + +const ( + // debugLogHeaderSize is the number of bytes in the framing + // header of every dlog record. + debugLogHeaderSize = 2 + + // debugLogSyncSize is the number of bytes in a sync record. + debugLogSyncSize = debugLogHeaderSize + 2*8 +) + +//go:nosplit +func (l *debugLogWriter) ensure(n uint64) { + for l.write+n >= l.r.begin+uint64(len(l.data)) { + // Consume record at begin. + if l.r.skip() == ^uint64(0) { + // Wrapped around within a record. + // + // TODO(austin): It would be better to just + // eat the whole buffer at this point, but we + // have to communicate that to the reader + // somehow. + throw("record wrapped around") + } + } +} + +//go:nosplit +func (l *debugLogWriter) writeFrameAt(pos, size uint64) bool { + l.data[pos%uint64(len(l.data))] = uint8(size) + l.data[(pos+1)%uint64(len(l.data))] = uint8(size >> 8) + return size <= 0xFFFF +} + +//go:nosplit +func (l *debugLogWriter) writeSync(tick, nano uint64) { + l.tick, l.nano = tick, nano + l.ensure(debugLogHeaderSize) + l.writeFrameAt(l.write, 0) + l.write += debugLogHeaderSize + l.writeUint64LE(tick) + l.writeUint64LE(nano) + l.r.end = l.write +} + +//go:nosplit +func (l *debugLogWriter) writeUint64LE(x uint64) { + var b [8]byte + b[0] = byte(x) + b[1] = byte(x >> 8) + b[2] = byte(x >> 16) + b[3] = byte(x >> 24) + b[4] = byte(x >> 32) + b[5] = byte(x >> 40) + b[6] = byte(x >> 48) + b[7] = byte(x >> 56) + l.bytes(b[:]) +} + +//go:nosplit +func (l *debugLogWriter) byte(x byte) { + l.ensure(1) + pos := l.write + l.write++ + l.data[pos%uint64(len(l.data))] = x +} + +//go:nosplit +func (l *debugLogWriter) bytes(x []byte) { + l.ensure(uint64(len(x))) + pos := l.write + l.write += uint64(len(x)) + for len(x) > 0 { + n := copy(l.data[pos%uint64(len(l.data)):], x) + pos += uint64(n) + x = x[n:] + } +} + +//go:nosplit +func (l *debugLogWriter) varint(x int64) { + var u uint64 + if x < 0 { + u = (^uint64(x) << 1) | 1 // complement i, bit 0 is 1 + } else { + u = (uint64(x) << 1) // do not complement i, bit 0 is 0 + } + l.uvarint(u) +} + +//go:nosplit +func (l *debugLogWriter) uvarint(u uint64) { + i := 0 + for u >= 0x80 { + l.buf[i] = byte(u) | 0x80 + u >>= 7 + i++ + } + l.buf[i] = byte(u) + i++ + l.bytes(l.buf[:i]) +} + +type debugLogReader struct { + data *debugLogBuf + + // begin and end are the positions in the log of the beginning + // and end of the log data, modulo len(data). + begin, end uint64 + + // tick and nano are the current time base at begin. + tick, nano uint64 +} + +//go:nosplit +func (r *debugLogReader) skip() uint64 { + // Read size at pos. + if r.begin+debugLogHeaderSize > r.end { + return ^uint64(0) + } + size := uint64(r.readUint16LEAt(r.begin)) + if size == 0 { + // Sync packet. + r.tick = r.readUint64LEAt(r.begin + debugLogHeaderSize) + r.nano = r.readUint64LEAt(r.begin + debugLogHeaderSize + 8) + size = debugLogSyncSize + } + if r.begin+size > r.end { + return ^uint64(0) + } + r.begin += size + return size +} + +//go:nosplit +func (r *debugLogReader) readUint16LEAt(pos uint64) uint16 { + return uint16(r.data[pos%uint64(len(r.data))]) | + uint16(r.data[(pos+1)%uint64(len(r.data))])<<8 +} + +//go:nosplit +func (r *debugLogReader) readUint64LEAt(pos uint64) uint64 { + var b [8]byte + for i := range b { + b[i] = r.data[pos%uint64(len(r.data))] + pos++ + } + return uint64(b[0]) | uint64(b[1])<<8 | + uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | + uint64(b[6])<<48 | uint64(b[7])<<56 +} + +func (r *debugLogReader) peek() (tick uint64) { + // Consume any sync records. + size := uint64(0) + for size == 0 { + if r.begin+debugLogHeaderSize > r.end { + return ^uint64(0) + } + size = uint64(r.readUint16LEAt(r.begin)) + if size != 0 { + break + } + if r.begin+debugLogSyncSize > r.end { + return ^uint64(0) + } + // Sync packet. + r.tick = r.readUint64LEAt(r.begin + debugLogHeaderSize) + r.nano = r.readUint64LEAt(r.begin + debugLogHeaderSize + 8) + r.begin += debugLogSyncSize + } + + // Peek tick delta. + if r.begin+size > r.end { + return ^uint64(0) + } + pos := r.begin + debugLogHeaderSize + var u uint64 + for i := uint(0); ; i += 7 { + b := r.data[pos%uint64(len(r.data))] + pos++ + u |= uint64(b&^0x80) << i + if b&0x80 == 0 { + break + } + } + if pos > r.begin+size { + return ^uint64(0) + } + return r.tick + u +} + +func (r *debugLogReader) header() (end, tick, nano uint64, p int) { + // Read size. We've already skipped sync packets and checked + // bounds in peek. + size := uint64(r.readUint16LEAt(r.begin)) + end = r.begin + size + r.begin += debugLogHeaderSize + + // Read tick, nano, and p. + tick = r.uvarint() + r.tick + nano = r.uvarint() + r.nano + p = int(r.varint()) + + return +} + +func (r *debugLogReader) uvarint() uint64 { + var u uint64 + for i := uint(0); ; i += 7 { + b := r.data[r.begin%uint64(len(r.data))] + r.begin++ + u |= uint64(b&^0x80) << i + if b&0x80 == 0 { + break + } + } + return u +} + +func (r *debugLogReader) varint() int64 { + u := r.uvarint() + var v int64 + if u&1 == 0 { + v = int64(u >> 1) + } else { + v = ^int64(u >> 1) + } + return v +} + +func (r *debugLogReader) printVal() bool { + typ := r.data[r.begin%uint64(len(r.data))] + r.begin++ + + switch typ { + default: + print("\n") + return false + + case debugLogUnknown: + print("") + + case debugLogBoolTrue: + print(true) + + case debugLogBoolFalse: + print(false) + + case debugLogInt: + print(r.varint()) + + case debugLogUint: + print(r.uvarint()) + + case debugLogHex, debugLogPtr: + print(hex(r.uvarint())) + + case debugLogString: + sl := r.uvarint() + if r.begin+sl > r.end { + r.begin = r.end + print("") + break + } + for sl > 0 { + b := r.data[r.begin%uint64(len(r.data)):] + if uint64(len(b)) > sl { + b = b[:sl] + } + r.begin += uint64(len(b)) + sl -= uint64(len(b)) + gwrite(b) + } + + case debugLogConstString: + len, ptr := int(r.uvarint()), uintptr(r.uvarint()) + ptr += firstmoduledata.etext + str := stringStruct{ + str: unsafe.Pointer(ptr), + len: len, + } + s := *(*string)(unsafe.Pointer(&str)) + print(s) + + case debugLogStringOverflow: + print("..(", r.uvarint(), " more bytes)..") + + case debugLogPC: + printDebugLogPC(uintptr(r.uvarint())) + + case debugLogTraceback: + n := int(r.uvarint()) + for i := 0; i < n; i++ { + print("\n\t") + printDebugLogPC(uintptr(r.uvarint())) + } + } + + return true +} + +// printDebugLog prints the debug log. +func printDebugLog() { + if !dlogEnabled { + return + } + + // This function should not panic or throw since it is used in + // the fatal panic path and this may deadlock. + + printlock() + + // Get the list of all debug logs. + allp := (*uintptr)(unsafe.Pointer(&allDloggers)) + all := (*dlogger)(unsafe.Pointer(atomic.Loaduintptr(allp))) + + // Count the logs. + n := 0 + for l := all; l != nil; l = l.allLink { + n++ + } + if n == 0 { + printunlock() + return + } + + // Prepare read state for all logs. + type readState struct { + debugLogReader + first bool + lost uint64 + nextTick uint64 + } + state1 := sysAlloc(unsafe.Sizeof(readState{})*uintptr(n), nil) + if state1 == nil { + println("failed to allocate read state for", n, "logs") + printunlock() + return + } + state := (*[1 << 20]readState)(state1)[:n] + { + l := all + for i := range state { + s := &state[i] + s.debugLogReader = l.w.r + s.first = true + s.lost = l.w.r.begin + s.nextTick = s.peek() + l = l.allLink + } + } + + // Print records. + for { + // Find the next record. + var best struct { + tick uint64 + i int + } + best.tick = ^uint64(0) + for i := range state { + if state[i].nextTick < best.tick { + best.tick = state[i].nextTick + best.i = i + } + } + if best.tick == ^uint64(0) { + break + } + + // Print record. + s := &state[best.i] + if s.first { + print(">> begin log ", best.i) + if s.lost != 0 { + print("; lost first ", s.lost>>10, "KB") + } + print(" <<\n") + s.first = false + } + + end, _, nano, p := s.header() + oldEnd := s.end + s.end = end + + print("[") + var tmpbuf [21]byte + pnano := int64(nano) - runtimeInitTime + if pnano < 0 { + // Logged before runtimeInitTime was set. + pnano = 0 + } + print(string(itoaDiv(tmpbuf[:], uint64(pnano), 9))) + print(" P ", p, "] ") + + for i := 0; s.begin < s.end; i++ { + if i > 0 { + print(" ") + } + if !s.printVal() { + // Abort this P log. + print("") + end = oldEnd + break + } + } + println() + + // Move on to the next record. + s.begin = end + s.end = oldEnd + s.nextTick = s.peek() + } + + printunlock() +} + +func printDebugLogPC(pc uintptr) { + print(hex(pc)) + fn := findfunc(pc) + if !fn.valid() { + print(" [unknown PC]") + } else { + name := funcname(fn) + file, line := funcline(fn, pc) + print(" [", name, "+", hex(pc-fn.entry), + " ", file, ":", line, "]") + } +} diff --git a/src/runtime/debuglog_off.go b/src/runtime/debuglog_off.go new file mode 100644 index 00000000000..bb3e172498e --- /dev/null +++ b/src/runtime/debuglog_off.go @@ -0,0 +1,19 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !debuglog + +package runtime + +const dlogEnabled = false + +type dlogPerM struct{} + +func getCachedDlogger() *dlogger { + return nil +} + +func putCachedDlogger(l *dlogger) bool { + return false +} diff --git a/src/runtime/debuglog_on.go b/src/runtime/debuglog_on.go new file mode 100644 index 00000000000..3d477e8ef5f --- /dev/null +++ b/src/runtime/debuglog_on.go @@ -0,0 +1,45 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build debuglog + +package runtime + +const dlogEnabled = true + +// dlogPerM is the per-M debug log data. This is embedded in the m +// struct. +type dlogPerM struct { + dlogCache *dlogger +} + +// getCachedDlogger returns a cached dlogger if it can do so +// efficiently, or nil otherwise. The returned dlogger will be owned. +func getCachedDlogger() *dlogger { + mp := acquirem() + // We don't return a cached dlogger if we're running on the + // signal stack in case the signal arrived while in + // get/putCachedDlogger. (Too bad we don't have non-atomic + // exchange!) + var l *dlogger + if getg() != mp.gsignal { + l = mp.dlogCache + mp.dlogCache = nil + } + releasem(mp) + return l +} + +// putCachedDlogger attempts to return l to the local cache. It +// returns false if this fails. +func putCachedDlogger(l *dlogger) bool { + mp := acquirem() + if getg() != mp.gsignal && mp.dlogCache == nil { + mp.dlogCache = l + releasem(mp) + return true + } + releasem(mp) + return false +} diff --git a/src/runtime/debuglog_test.go b/src/runtime/debuglog_test.go new file mode 100644 index 00000000000..2570e3565bf --- /dev/null +++ b/src/runtime/debuglog_test.go @@ -0,0 +1,158 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// TODO(austin): All of these tests are skipped if the debuglog build +// tag isn't provided. That means we basically never test debuglog. +// There are two potential ways around this: +// +// 1. Make these tests re-build the runtime test with the debuglog +// build tag and re-invoke themselves. +// +// 2. Always build the whole debuglog infrastructure and depend on +// linker dead-code elimination to drop it. This is easy for dlog() +// since there won't be any calls to it. For printDebugLog, we can +// make panic call a wrapper that is call printDebugLog if the +// debuglog build tag is set, or otherwise do nothing. Then tests +// could call printDebugLog directly. This is the right answer in +// principle, but currently our linker reads in all symbols +// regardless, so this would slow down and bloat all links. If the +// linker gets more efficient about this, we should revisit this +// approach. + +package runtime_test + +import ( + "bytes" + "fmt" + "regexp" + "runtime" + "strings" + "sync" + "sync/atomic" + "testing" +) + +func skipDebugLog(t *testing.T) { + if !runtime.DlogEnabled { + t.Skip("debug log disabled (rebuild with -tags debuglog)") + } +} + +func dlogCanonicalize(x string) string { + begin := regexp.MustCompile(`(?m)^>> begin log \d+ <<\n`) + x = begin.ReplaceAllString(x, "") + prefix := regexp.MustCompile(`(?m)^\[[^]]+\]`) + x = prefix.ReplaceAllString(x, "[]") + return x +} + +func TestDebugLog(t *testing.T) { + skipDebugLog(t) + runtime.ResetDebugLog() + runtime.Dlog().S("testing").End() + got := dlogCanonicalize(runtime.DumpDebugLog()) + if want := "[] testing\n"; got != want { + t.Fatalf("want %q, got %q", want, got) + } +} + +func TestDebugLogTypes(t *testing.T) { + skipDebugLog(t) + runtime.ResetDebugLog() + var varString = strings.Repeat("a", 4) + runtime.Dlog().B(true).B(false).I(-42).I16(0x7fff).U64(^uint64(0)).Hex(0xfff).P(nil).S(varString).S("const string").End() + got := dlogCanonicalize(runtime.DumpDebugLog()) + if want := "[] true false -42 32767 18446744073709551615 0xfff 0x0 aaaa const string\n"; got != want { + t.Fatalf("want %q, got %q", want, got) + } +} + +func TestDebugLogSym(t *testing.T) { + skipDebugLog(t) + runtime.ResetDebugLog() + pc, _, _, _ := runtime.Caller(0) + runtime.Dlog().PC(pc).End() + got := dlogCanonicalize(runtime.DumpDebugLog()) + want := regexp.MustCompile(`\[\] 0x[0-9a-f]+ \[runtime_test\.TestDebugLogSym\+0x[0-9a-f]+ .*/debuglog_test\.go:[0-9]+\]\n`) + if !want.MatchString(got) { + t.Fatalf("want matching %s, got %q", want, got) + } +} + +func TestDebugLogInterleaving(t *testing.T) { + skipDebugLog(t) + runtime.ResetDebugLog() + var wg sync.WaitGroup + done := int32(0) + wg.Add(1) + go func() { + // Encourage main goroutine to move around to + // different Ms and Ps. + for atomic.LoadInt32(&done) == 0 { + runtime.Gosched() + } + wg.Done() + }() + var want bytes.Buffer + for i := 0; i < 1000; i++ { + runtime.Dlog().I(i).End() + fmt.Fprintf(&want, "[] %d\n", i) + runtime.Gosched() + } + atomic.StoreInt32(&done, 1) + wg.Wait() + + gotFull := runtime.DumpDebugLog() + got := dlogCanonicalize(gotFull) + if got != want.String() { + // Since the timestamps are useful in understand + // failures of this test, we print the uncanonicalized + // output. + t.Fatalf("want %q, got (uncanonicalized) %q", want.String(), gotFull) + } +} + +func TestDebugLogWraparound(t *testing.T) { + skipDebugLog(t) + + // Make sure we don't switch logs so it's easier to fill one up. + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + runtime.ResetDebugLog() + var longString = strings.Repeat("a", 128) + var want bytes.Buffer + for i, j := 0, 0; j < 2*runtime.DebugLogBytes; i, j = i+1, j+len(longString) { + runtime.Dlog().I(i).S(longString).End() + fmt.Fprintf(&want, "[] %d %s\n", i, longString) + } + log := runtime.DumpDebugLog() + + // Check for "lost" message. + lost := regexp.MustCompile(`^>> begin log \d+; lost first \d+KB <<\n`) + if !lost.MatchString(log) { + t.Fatalf("want matching %s, got %q", lost, log) + } + idx := lost.FindStringIndex(log) + // Strip lost message. + log = dlogCanonicalize(log[idx[1]:]) + + // Check log. + if !strings.HasSuffix(want.String(), log) { + t.Fatalf("wrong suffix:\n%s", log) + } +} + +func TestDebugLogLongString(t *testing.T) { + skipDebugLog(t) + + runtime.ResetDebugLog() + var longString = strings.Repeat("a", runtime.DebugLogStringLimit+1) + runtime.Dlog().S(longString).End() + got := dlogCanonicalize(runtime.DumpDebugLog()) + want := "[] " + strings.Repeat("a", runtime.DebugLogStringLimit) + " ..(1 more bytes)..\n" + if got != want { + t.Fatalf("want %q, got %q", want, got) + } +} diff --git a/src/runtime/export_debuglog_test.go b/src/runtime/export_debuglog_test.go new file mode 100644 index 00000000000..8cd943b4382 --- /dev/null +++ b/src/runtime/export_debuglog_test.go @@ -0,0 +1,46 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Export debuglog guts for testing. + +package runtime + +const DlogEnabled = dlogEnabled + +const DebugLogBytes = debugLogBytes + +const DebugLogStringLimit = debugLogStringLimit + +var Dlog = dlog + +func (l *dlogger) End() { l.end() } +func (l *dlogger) B(x bool) *dlogger { return l.b(x) } +func (l *dlogger) I(x int) *dlogger { return l.i(x) } +func (l *dlogger) I16(x int16) *dlogger { return l.i16(x) } +func (l *dlogger) U64(x uint64) *dlogger { return l.u64(x) } +func (l *dlogger) Hex(x uint64) *dlogger { return l.hex(x) } +func (l *dlogger) P(x interface{}) *dlogger { return l.p(x) } +func (l *dlogger) S(x string) *dlogger { return l.s(x) } +func (l *dlogger) PC(x uintptr) *dlogger { return l.pc(x) } + +func DumpDebugLog() string { + g := getg() + g.writebuf = make([]byte, 0, 1<<20) + printDebugLog() + buf := g.writebuf + g.writebuf = nil + + return string(buf) +} + +func ResetDebugLog() { + stopTheWorld("ResetDebugLog") + for l := allDloggers; l != nil; l = l.allLink { + l.w.write = 0 + l.w.tick, l.w.nano = 0, 0 + l.w.r.begin, l.w.r.end = 0, 0 + l.w.r.tick, l.w.r.nano = 0, 0 + } + startTheWorld() +} diff --git a/src/runtime/panic.go b/src/runtime/panic.go index 543fd23c01c..f39a4bc0a25 100644 --- a/src/runtime/panic.go +++ b/src/runtime/panic.go @@ -925,6 +925,8 @@ func dopanic_m(gp *g, pc, sp uintptr) bool { lock(&deadlock) } + printDebugLog() + return docrash } diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go index 0dd2e929a07..cfea1cd45fe 100644 --- a/src/runtime/runtime2.go +++ b/src/runtime/runtime2.go @@ -468,6 +468,8 @@ type m struct { vdsoSP uintptr // SP for traceback while in VDSO call (0 if not in call) vdsoPC uintptr // PC for traceback while in VDSO call + dlogPerM + mOS } diff --git a/src/runtime/signal_sighandler.go b/src/runtime/signal_sighandler.go index 6e71e41f521..bec46532188 100644 --- a/src/runtime/signal_sighandler.go +++ b/src/runtime/signal_sighandler.go @@ -148,5 +148,7 @@ func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) { crash() } + printDebugLog() + exit(2) }