mirror of
https://github.com/golang/go
synced 2024-11-17 16:14:42 -07:00
runtime: convert timeHistogram to atomic types
I've dropped the note that sched.timeToRun is protected by sched.lock, as it does not seem to be true. For #53821. Change-Id: I03f8dc6ca0bcd4ccf3ec113010a0aa39c6f7d6ef Reviewed-on: https://go-review.googlesource.com/c/go/+/419449 Reviewed-by: Austin Clements <austin@google.com> TryBot-Result: Gopher Robot <gobot@golang.org> Run-TryBot: Michael Pratt <mpratt@google.com>
This commit is contained in:
parent
09cc9bac72
commit
b04e4637db
@ -17,8 +17,6 @@ var AtomicFields = []uintptr{
|
|||||||
unsafe.Offsetof(p{}.timer0When),
|
unsafe.Offsetof(p{}.timer0When),
|
||||||
unsafe.Offsetof(p{}.timerModifiedEarliest),
|
unsafe.Offsetof(p{}.timerModifiedEarliest),
|
||||||
unsafe.Offsetof(p{}.gcFractionalMarkTime),
|
unsafe.Offsetof(p{}.gcFractionalMarkTime),
|
||||||
unsafe.Offsetof(schedt{}.timeToRun),
|
|
||||||
unsafe.Offsetof(timeHistogram{}.underflow),
|
|
||||||
unsafe.Offsetof(profBuf{}.overflow),
|
unsafe.Offsetof(profBuf{}.overflow),
|
||||||
unsafe.Offsetof(profBuf{}.overflowTime),
|
unsafe.Offsetof(profBuf{}.overflowTime),
|
||||||
unsafe.Offsetof(heapStatsDelta{}.tinyAllocCount),
|
unsafe.Offsetof(heapStatsDelta{}.tinyAllocCount),
|
||||||
@ -37,10 +35,8 @@ var AtomicFields = []uintptr{
|
|||||||
unsafe.Offsetof(lfnode{}.next),
|
unsafe.Offsetof(lfnode{}.next),
|
||||||
unsafe.Offsetof(mstats{}.last_gc_nanotime),
|
unsafe.Offsetof(mstats{}.last_gc_nanotime),
|
||||||
unsafe.Offsetof(mstats{}.last_gc_unix),
|
unsafe.Offsetof(mstats{}.last_gc_unix),
|
||||||
unsafe.Offsetof(mstats{}.gcPauseDist),
|
|
||||||
unsafe.Offsetof(ticksType{}.val),
|
unsafe.Offsetof(ticksType{}.val),
|
||||||
unsafe.Offsetof(workType{}.bytesMarked),
|
unsafe.Offsetof(workType{}.bytesMarked),
|
||||||
unsafe.Offsetof(timeHistogram{}.counts),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// AtomicVariables is the set of global variables on which we perform
|
// AtomicVariables is the set of global variables on which we perform
|
||||||
|
@ -1244,9 +1244,9 @@ func (th *TimeHistogram) Count(bucket, subBucket uint) (uint64, bool) {
|
|||||||
t := (*timeHistogram)(th)
|
t := (*timeHistogram)(th)
|
||||||
i := bucket*TimeHistNumSubBuckets + subBucket
|
i := bucket*TimeHistNumSubBuckets + subBucket
|
||||||
if i >= uint(len(t.counts)) {
|
if i >= uint(len(t.counts)) {
|
||||||
return t.underflow, false
|
return t.underflow.Load(), false
|
||||||
}
|
}
|
||||||
return t.counts[i], true
|
return t.counts[i].Load(), true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (th *TimeHistogram) Record(duration int64) {
|
func (th *TimeHistogram) Record(duration int64) {
|
||||||
|
@ -66,18 +66,16 @@ const (
|
|||||||
// It is an HDR histogram with exponentially-distributed
|
// It is an HDR histogram with exponentially-distributed
|
||||||
// buckets and linearly distributed sub-buckets.
|
// buckets and linearly distributed sub-buckets.
|
||||||
//
|
//
|
||||||
// Counts in the histogram are updated atomically, so it is safe
|
// The histogram is safe for concurrent reads and writes.
|
||||||
// for concurrent use. It is also safe to read all the values
|
|
||||||
// atomically.
|
|
||||||
type timeHistogram struct {
|
type timeHistogram struct {
|
||||||
counts [timeHistNumSuperBuckets * timeHistNumSubBuckets]uint64
|
counts [timeHistNumSuperBuckets * timeHistNumSubBuckets]atomic.Uint64
|
||||||
|
|
||||||
// underflow counts all the times we got a negative duration
|
// underflow counts all the times we got a negative duration
|
||||||
// sample. Because of how time works on some platforms, it's
|
// sample. Because of how time works on some platforms, it's
|
||||||
// possible to measure negative durations. We could ignore them,
|
// possible to measure negative durations. We could ignore them,
|
||||||
// but we record them anyway because it's better to have some
|
// but we record them anyway because it's better to have some
|
||||||
// signal that it's happening than just missing samples.
|
// signal that it's happening than just missing samples.
|
||||||
underflow uint64
|
underflow atomic.Uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
// record adds the given duration to the distribution.
|
// record adds the given duration to the distribution.
|
||||||
@ -88,7 +86,7 @@ type timeHistogram struct {
|
|||||||
//go:nosplit
|
//go:nosplit
|
||||||
func (h *timeHistogram) record(duration int64) {
|
func (h *timeHistogram) record(duration int64) {
|
||||||
if duration < 0 {
|
if duration < 0 {
|
||||||
atomic.Xadd64(&h.underflow, 1)
|
h.underflow.Add(1)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// The index of the exponential bucket is just the index
|
// The index of the exponential bucket is just the index
|
||||||
@ -116,7 +114,7 @@ func (h *timeHistogram) record(duration int64) {
|
|||||||
} else {
|
} else {
|
||||||
subBucket = uint(duration)
|
subBucket = uint(duration)
|
||||||
}
|
}
|
||||||
atomic.Xadd64(&h.counts[superBucket*timeHistNumSubBuckets+subBucket], 1)
|
h.counts[superBucket*timeHistNumSubBuckets+subBucket].Add(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -7,7 +7,6 @@ package runtime
|
|||||||
// Metrics implementation exported to runtime/metrics.
|
// Metrics implementation exported to runtime/metrics.
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"runtime/internal/atomic"
|
|
||||||
"unsafe"
|
"unsafe"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -197,9 +196,9 @@ func initMetrics() {
|
|||||||
// The bottom-most bucket, containing negative values, is tracked
|
// The bottom-most bucket, containing negative values, is tracked
|
||||||
// as a separately as underflow, so fill that in manually and then
|
// as a separately as underflow, so fill that in manually and then
|
||||||
// iterate over the rest.
|
// iterate over the rest.
|
||||||
hist.counts[0] = atomic.Load64(&memstats.gcPauseDist.underflow)
|
hist.counts[0] = memstats.gcPauseDist.underflow.Load()
|
||||||
for i := range memstats.gcPauseDist.counts {
|
for i := range memstats.gcPauseDist.counts {
|
||||||
hist.counts[i+1] = atomic.Load64(&memstats.gcPauseDist.counts[i])
|
hist.counts[i+1] = memstats.gcPauseDist.counts[i].Load()
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -327,9 +326,9 @@ func initMetrics() {
|
|||||||
"/sched/latencies:seconds": {
|
"/sched/latencies:seconds": {
|
||||||
compute: func(_ *statAggregate, out *metricValue) {
|
compute: func(_ *statAggregate, out *metricValue) {
|
||||||
hist := out.float64HistOrInit(timeHistBuckets)
|
hist := out.float64HistOrInit(timeHistBuckets)
|
||||||
hist.counts[0] = atomic.Load64(&sched.timeToRun.underflow)
|
hist.counts[0] = sched.timeToRun.underflow.Load()
|
||||||
for i := range sched.timeToRun.counts {
|
for i := range sched.timeToRun.counts {
|
||||||
hist.counts[i+1] = atomic.Load64(&sched.timeToRun.counts[i])
|
hist.counts[i+1] = sched.timeToRun.counts[i].Load()
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -334,10 +334,6 @@ func init() {
|
|||||||
println(offset)
|
println(offset)
|
||||||
throw("memstats.heapStats not aligned to 8 bytes")
|
throw("memstats.heapStats not aligned to 8 bytes")
|
||||||
}
|
}
|
||||||
if offset := unsafe.Offsetof(memstats.gcPauseDist); offset%8 != 0 {
|
|
||||||
println(offset)
|
|
||||||
throw("memstats.gcPauseDist not aligned to 8 bytes")
|
|
||||||
}
|
|
||||||
// Ensure the size of heapStatsDelta causes adjacent fields/slots (e.g.
|
// Ensure the size of heapStatsDelta causes adjacent fields/slots (e.g.
|
||||||
// [3]heapStatsDelta) to be 8-byte aligned.
|
// [3]heapStatsDelta) to be 8-byte aligned.
|
||||||
if size := unsafe.Sizeof(heapStatsDelta{}); size%8 != 0 {
|
if size := unsafe.Sizeof(heapStatsDelta{}); size%8 != 0 {
|
||||||
|
@ -703,11 +703,6 @@ func schedinit() {
|
|||||||
sigsave(&gp.m.sigmask)
|
sigsave(&gp.m.sigmask)
|
||||||
initSigmask = gp.m.sigmask
|
initSigmask = gp.m.sigmask
|
||||||
|
|
||||||
if offset := unsafe.Offsetof(sched.timeToRun); offset%8 != 0 {
|
|
||||||
println(offset)
|
|
||||||
throw("sched.timeToRun not aligned to 8 bytes")
|
|
||||||
}
|
|
||||||
|
|
||||||
goargs()
|
goargs()
|
||||||
goenvs()
|
goenvs()
|
||||||
parsedebugvars()
|
parsedebugvars()
|
||||||
|
@ -843,8 +843,6 @@ type schedt struct {
|
|||||||
// timeToRun is a distribution of scheduling latencies, defined
|
// timeToRun is a distribution of scheduling latencies, defined
|
||||||
// as the sum of time a G spends in the _Grunnable state before
|
// as the sum of time a G spends in the _Grunnable state before
|
||||||
// it transitions to _Grunning.
|
// it transitions to _Grunning.
|
||||||
//
|
|
||||||
// timeToRun is protected by sched.lock.
|
|
||||||
timeToRun timeHistogram
|
timeToRun timeHistogram
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user