1
0
mirror of https://github.com/golang/go synced 2024-11-23 00:30:07 -07:00
go/src/runtime/tracestring.go

98 lines
2.4 KiB
Go
Raw Normal View History

// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Trace string management.
package runtime
// Trace strings.
const maxTraceStringLen = 1024
// traceStringTable is map of string -> unique ID that also manages
// writing strings out into the trace.
type traceStringTable struct {
// lock protects buf.
lock mutex
buf *traceBuf // string batches to write out to the trace.
// tab is a mapping of string -> unique ID.
tab traceMap
}
// put adds a string to the table, emits it, and returns a unique ID for it.
func (t *traceStringTable) put(gen uintptr, s string) uint64 {
// Put the string in the table.
ss := stringStructOf(&s)
id, added := t.tab.put(ss.str, uintptr(ss.len))
if added {
// Write the string to the buffer.
systemstack(func() {
t.writeString(gen, id, s)
})
}
return id
}
// emit emits a string and creates an ID for it, but doesn't add it to the table. Returns the ID.
func (t *traceStringTable) emit(gen uintptr, s string) uint64 {
// Grab an ID and write the string to the buffer.
id := t.tab.stealID()
systemstack(func() {
t.writeString(gen, id, s)
})
return id
}
// writeString writes the string to t.buf.
//
// Must run on the systemstack because it acquires t.lock.
//
//go:systemstack
func (t *traceStringTable) writeString(gen uintptr, id uint64, s string) {
// Truncate the string if necessary.
if len(s) > maxTraceStringLen {
s = s[:maxTraceStringLen]
}
lock(&t.lock)
w := unsafeTraceWriter(gen, t.buf)
// Ensure we have a place to write to.
var flushed bool
w, flushed = w.ensure(2 + 2*traceBytesPerNumber + len(s) /* traceEvStrings + traceEvString + ID + len + string data */)
if flushed {
// Annotate the batch as containing strings.
w.byte(byte(traceEvStrings))
}
// Write out the string.
w.byte(byte(traceEvString))
w.varint(id)
w.varint(uint64(len(s)))
w.stringData(s)
// Store back buf in case it was updated during ensure.
t.buf = w.traceBuf
unlock(&t.lock)
}
// reset clears the string table and flushes any buffers it has.
//
// Must be called only once the caller is certain nothing else will be
// added to this table.
func (t *traceStringTable) reset(gen uintptr) {
if t.buf != nil {
systemstack(func() {
lock(&trace.lock)
traceBufFlush(t.buf, gen)
unlock(&trace.lock)
})
t.buf = nil
}
// Reset the table.
runtime: rewrite traceMap to scale better The existing implementation of traceMap is a hash map with a fixed bucket table size which scales poorly with the number of elements added to the map. After a few thousands elements are in the map, it tends to fall over. Furthermore, cleaning up the trace map is currently non-preemptible, without very good reason. This change replaces the traceMap implementation with a simple append-only concurrent hash-trie. The data structure is incredibly simple and does not suffer at all from the same scaling issues. Because the traceMap no longer has a lock, and the traceRegionAlloc it embeds is not thread-safe, we have to push that lock down. While we're here, this change also makes the fast path for the traceRegionAlloc lock-free. This may not be inherently faster due to contention on the atomic add, but it creates an easy path to sharding the main allocation buffer to reduce contention in the future. (We might want to also consider a fully thread-local allocator that covers both string and stack tables. The only reason a thread-local allocator isn't feasible right now is because each of these has their own region, but we could certainly group all them together.) Change-Id: I8c06d42825c326061a1b8569e322afc4bc2a513a Reviewed-on: https://go-review.googlesource.com/c/go/+/570035 Reviewed-by: Carlos Amedee <carlos@golang.org> Auto-Submit: Michael Knyszek <mknyszek@google.com> TryBot-Bypass: Michael Knyszek <mknyszek@google.com> Reviewed-by: David Chase <drchase@google.com>
2024-03-15 16:18:06 -06:00
t.tab.reset()
}