mirror of
https://github.com/golang/go
synced 2024-11-11 22:50:22 -07:00
runtime/metrics: add tiny allocs metric
Currently tiny allocations are not represented in either MemStats or runtime/metrics, but they're represented in MemStats (indirectly) via Mallocs. Add them to runtime/metrics by first merging memstats.tinyallocs into consistentHeapStats (just for simplicity; it's monotonic so metrics would still be self-consistent if we just read it atomically) and then adding /gc/heap/tiny/allocs:objects to the list of supported metrics. Change-Id: Ie478006ab942a3e877b4a79065ffa43569722f3d Reviewed-on: https://go-review.googlesource.com/c/go/+/312909 Trust: Michael Knyszek <mknyszek@google.com> Run-TryBot: Michael Knyszek <mknyszek@google.com> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Michael Pratt <mpratt@google.com>
This commit is contained in:
parent
7d22c2181b
commit
0b9ca4d907
@ -393,7 +393,7 @@ func ReadMemStatsSlow() (base, slow MemStats) {
|
|||||||
bySize[i].Mallocs += uint64(m.smallFreeCount[i])
|
bySize[i].Mallocs += uint64(m.smallFreeCount[i])
|
||||||
smallFree += uint64(m.smallFreeCount[i]) * uint64(class_to_size[i])
|
smallFree += uint64(m.smallFreeCount[i]) * uint64(class_to_size[i])
|
||||||
}
|
}
|
||||||
slow.Frees += memstats.tinyallocs + uint64(m.largeFreeCount)
|
slow.Frees += uint64(m.tinyAllocCount) + uint64(m.largeFreeCount)
|
||||||
slow.Mallocs += slow.Frees
|
slow.Mallocs += slow.Frees
|
||||||
|
|
||||||
slow.TotalAlloc = slow.Alloc + uint64(m.largeFree) + smallFree
|
slow.TotalAlloc = slow.Alloc + uint64(m.largeFree) + smallFree
|
||||||
|
@ -176,18 +176,18 @@ func (c *mcache) refill(spc spanClass) {
|
|||||||
// mcache. If it gets uncached, we'll adjust this.
|
// mcache. If it gets uncached, we'll adjust this.
|
||||||
stats := memstats.heapStats.acquire()
|
stats := memstats.heapStats.acquire()
|
||||||
atomic.Xadduintptr(&stats.smallAllocCount[spc.sizeclass()], uintptr(s.nelems)-uintptr(s.allocCount))
|
atomic.Xadduintptr(&stats.smallAllocCount[spc.sizeclass()], uintptr(s.nelems)-uintptr(s.allocCount))
|
||||||
|
|
||||||
|
// Flush tinyAllocs.
|
||||||
|
if spc == tinySpanClass {
|
||||||
|
atomic.Xadduintptr(&stats.tinyAllocCount, c.tinyAllocs)
|
||||||
|
c.tinyAllocs = 0
|
||||||
|
}
|
||||||
memstats.heapStats.release()
|
memstats.heapStats.release()
|
||||||
|
|
||||||
// Update gcController.heapLive with the same assumption.
|
// Update gcController.heapLive with the same assumption.
|
||||||
usedBytes := uintptr(s.allocCount) * s.elemsize
|
usedBytes := uintptr(s.allocCount) * s.elemsize
|
||||||
atomic.Xadd64(&gcController.heapLive, int64(s.npages*pageSize)-int64(usedBytes))
|
atomic.Xadd64(&gcController.heapLive, int64(s.npages*pageSize)-int64(usedBytes))
|
||||||
|
|
||||||
// Flush tinyAllocs.
|
|
||||||
if spc == tinySpanClass {
|
|
||||||
atomic.Xadd64(&memstats.tinyallocs, int64(c.tinyAllocs))
|
|
||||||
c.tinyAllocs = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// While we're here, flush scanAlloc, since we have to call
|
// While we're here, flush scanAlloc, since we have to call
|
||||||
// revise anyway.
|
// revise anyway.
|
||||||
atomic.Xadd64(&gcController.heapScan, int64(c.scanAlloc))
|
atomic.Xadd64(&gcController.heapScan, int64(c.scanAlloc))
|
||||||
@ -280,8 +280,12 @@ func (c *mcache) releaseAll() {
|
|||||||
// Clear tinyalloc pool.
|
// Clear tinyalloc pool.
|
||||||
c.tiny = 0
|
c.tiny = 0
|
||||||
c.tinyoffset = 0
|
c.tinyoffset = 0
|
||||||
atomic.Xadd64(&memstats.tinyallocs, int64(c.tinyAllocs))
|
|
||||||
|
// Flush tinyAllocs.
|
||||||
|
stats := memstats.heapStats.acquire()
|
||||||
|
atomic.Xadduintptr(&stats.tinyAllocCount, c.tinyAllocs)
|
||||||
c.tinyAllocs = 0
|
c.tinyAllocs = 0
|
||||||
|
memstats.heapStats.release()
|
||||||
|
|
||||||
// Updated heapScan and possible gcController.heapLive.
|
// Updated heapScan and possible gcController.heapLive.
|
||||||
if gcBlackenEnabled != 0 {
|
if gcBlackenEnabled != 0 {
|
||||||
|
@ -124,6 +124,13 @@ func initMetrics() {
|
|||||||
out.scalar = in.heapStats.numObjects
|
out.scalar = in.heapStats.numObjects
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
"/gc/heap/tiny/allocs:objects": {
|
||||||
|
deps: makeStatDepSet(heapStatsDep),
|
||||||
|
compute: func(in *statAggregate, out *metricValue) {
|
||||||
|
out.kind = metricKindUint64
|
||||||
|
out.scalar = uint64(in.heapStats.tinyAllocCount)
|
||||||
|
},
|
||||||
|
},
|
||||||
"/gc/pauses:seconds": {
|
"/gc/pauses:seconds": {
|
||||||
compute: func(_ *statAggregate, out *metricValue) {
|
compute: func(_ *statAggregate, out *metricValue) {
|
||||||
hist := out.float64HistOrInit(timeHistBuckets)
|
hist := out.float64HistOrInit(timeHistBuckets)
|
||||||
|
@ -91,6 +91,16 @@ var allDesc = []Description{
|
|||||||
Description: "Number of objects, live or unswept, occupying heap memory.",
|
Description: "Number of objects, live or unswept, occupying heap memory.",
|
||||||
Kind: KindUint64,
|
Kind: KindUint64,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Name: "/gc/heap/tiny/allocs:objects",
|
||||||
|
Description: "Count of small allocations that are packed together into blocks. " +
|
||||||
|
"These allocations are counted separately from other allocations " +
|
||||||
|
"because each individual allocation is not tracked by the runtime, " +
|
||||||
|
"only their block. Each block is already accounted for in " +
|
||||||
|
"allocs-by-size and frees-by-size.",
|
||||||
|
Kind: KindUint64,
|
||||||
|
Cumulative: true,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Name: "/gc/pauses:seconds",
|
Name: "/gc/pauses:seconds",
|
||||||
Description: "Distribution individual GC-related stop-the-world pause latencies.",
|
Description: "Distribution individual GC-related stop-the-world pause latencies.",
|
||||||
|
@ -72,6 +72,13 @@ Below is the full list of supported metrics, ordered lexicographically.
|
|||||||
/gc/heap/objects:objects
|
/gc/heap/objects:objects
|
||||||
Number of objects, live or unswept, occupying heap memory.
|
Number of objects, live or unswept, occupying heap memory.
|
||||||
|
|
||||||
|
/gc/heap/tiny/allocs:objects
|
||||||
|
Count of small allocations that are packed together into blocks.
|
||||||
|
These allocations are counted separately from other allocations
|
||||||
|
because each individual allocation is not tracked by the runtime,
|
||||||
|
only their block. Each block is already accounted for in
|
||||||
|
allocs-by-size and frees-by-size.
|
||||||
|
|
||||||
/gc/pauses:seconds
|
/gc/pauses:seconds
|
||||||
Distribution individual GC-related stop-the-world pause latencies.
|
Distribution individual GC-related stop-the-world pause latencies.
|
||||||
|
|
||||||
|
@ -40,6 +40,8 @@ func TestReadMetrics(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Check to make sure the values we read line up with other values we read.
|
// Check to make sure the values we read line up with other values we read.
|
||||||
|
var allocsBySize *metrics.Float64Histogram
|
||||||
|
var tinyAllocs uint64
|
||||||
for i := range samples {
|
for i := range samples {
|
||||||
switch name := samples[i].Name; name {
|
switch name := samples[i].Name; name {
|
||||||
case "/memory/classes/heap/free:bytes":
|
case "/memory/classes/heap/free:bytes":
|
||||||
@ -84,6 +86,7 @@ func TestReadMetrics(t *testing.T) {
|
|||||||
t.Errorf("histogram counts do not much BySize for class %d: got %d, want %d", i, c, m)
|
t.Errorf("histogram counts do not much BySize for class %d: got %d, want %d", i, c, m)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
allocsBySize = hist
|
||||||
case "/gc/heap/frees-by-size:bytes":
|
case "/gc/heap/frees-by-size:bytes":
|
||||||
hist := samples[i].Value.Float64Histogram()
|
hist := samples[i].Value.Float64Histogram()
|
||||||
// Skip size class 0 in BySize, because it's always empty and not represented
|
// Skip size class 0 in BySize, because it's always empty and not represented
|
||||||
@ -95,9 +98,20 @@ func TestReadMetrics(t *testing.T) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if c, f := hist.Counts[i], sc.Frees; c != f {
|
if c, f := hist.Counts[i], sc.Frees; c != f {
|
||||||
t.Errorf("histogram counts do not much BySize for class %d: got %d, want %d", i, c, f)
|
t.Errorf("histogram counts do not match BySize for class %d: got %d, want %d", i, c, f)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
case "/gc/heap/tiny/allocs:objects":
|
||||||
|
// Currently, MemStats adds tiny alloc count to both Mallocs AND Frees.
|
||||||
|
// The reason for this is because MemStats couldn't be extended at the time
|
||||||
|
// but there was a desire to have Mallocs at least be a little more representative,
|
||||||
|
// while having Mallocs - Frees still represent a live object count.
|
||||||
|
// Unfortunately, MemStats doesn't actually export a large allocation count,
|
||||||
|
// so it's impossible to pull this number out directly.
|
||||||
|
//
|
||||||
|
// Check tiny allocation count outside of this loop, by using the allocs-by-size
|
||||||
|
// histogram in order to figure out how many large objects there are.
|
||||||
|
tinyAllocs = samples[i].Value.Uint64()
|
||||||
case "/gc/heap/objects:objects":
|
case "/gc/heap/objects:objects":
|
||||||
checkUint64(t, name, samples[i].Value.Uint64(), mstats.HeapObjects)
|
checkUint64(t, name, samples[i].Value.Uint64(), mstats.HeapObjects)
|
||||||
case "/gc/heap/goal:bytes":
|
case "/gc/heap/goal:bytes":
|
||||||
@ -110,6 +124,13 @@ func TestReadMetrics(t *testing.T) {
|
|||||||
checkUint64(t, name, samples[i].Value.Uint64(), uint64(mstats.NumGC))
|
checkUint64(t, name, samples[i].Value.Uint64(), uint64(mstats.NumGC))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check tinyAllocs.
|
||||||
|
nonTinyAllocs := uint64(0)
|
||||||
|
for _, c := range allocsBySize.Counts {
|
||||||
|
nonTinyAllocs += c
|
||||||
|
}
|
||||||
|
checkUint64(t, "/gc/heap/tiny/allocs:objects", tinyAllocs, mstats.Mallocs-nonTinyAllocs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestReadMetricsConsistency(t *testing.T) {
|
func TestReadMetricsConsistency(t *testing.T) {
|
||||||
|
@ -8,6 +8,7 @@ package runtime
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"runtime/internal/atomic"
|
"runtime/internal/atomic"
|
||||||
|
"runtime/internal/sys"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -86,7 +87,6 @@ type mstats struct {
|
|||||||
_ [1 - _NumSizeClasses%2]uint32
|
_ [1 - _NumSizeClasses%2]uint32
|
||||||
|
|
||||||
last_gc_nanotime uint64 // last gc (monotonic time)
|
last_gc_nanotime uint64 // last gc (monotonic time)
|
||||||
tinyallocs uint64 // number of tiny allocations that didn't cause actual allocation; not exported to go directly
|
|
||||||
last_heap_inuse uint64 // heap_inuse at mark termination of the previous GC
|
last_heap_inuse uint64 // heap_inuse at mark termination of the previous GC
|
||||||
|
|
||||||
// heapStats is a set of statistics
|
// heapStats is a set of statistics
|
||||||
@ -586,8 +586,8 @@ func updatememstats() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Account for tiny allocations.
|
// Account for tiny allocations.
|
||||||
memstats.nfree += memstats.tinyallocs
|
memstats.nfree += uint64(consStats.tinyAllocCount)
|
||||||
memstats.nmalloc += memstats.tinyallocs
|
memstats.nmalloc += uint64(consStats.tinyAllocCount)
|
||||||
|
|
||||||
// Calculate derived stats.
|
// Calculate derived stats.
|
||||||
memstats.total_alloc = totalAlloc
|
memstats.total_alloc = totalAlloc
|
||||||
@ -703,6 +703,7 @@ type heapStatsDelta struct {
|
|||||||
inPtrScalarBits int64 // byte delta of memory reserved for unrolled GC prog bits
|
inPtrScalarBits int64 // byte delta of memory reserved for unrolled GC prog bits
|
||||||
|
|
||||||
// Allocator stats.
|
// Allocator stats.
|
||||||
|
tinyAllocCount uintptr // number of tiny allocations
|
||||||
largeAlloc uintptr // bytes allocated for large objects
|
largeAlloc uintptr // bytes allocated for large objects
|
||||||
largeAllocCount uintptr // number of large object allocations
|
largeAllocCount uintptr // number of large object allocations
|
||||||
smallAllocCount [_NumSizeClasses]uintptr // number of allocs for small objects
|
smallAllocCount [_NumSizeClasses]uintptr // number of allocs for small objects
|
||||||
@ -712,7 +713,7 @@ type heapStatsDelta struct {
|
|||||||
|
|
||||||
// Add a uint32 to ensure this struct is a multiple of 8 bytes in size.
|
// Add a uint32 to ensure this struct is a multiple of 8 bytes in size.
|
||||||
// Only necessary on 32-bit platforms.
|
// Only necessary on 32-bit platforms.
|
||||||
// _ [(sys.PtrSize / 4) % 2]uint32
|
_ [(sys.PtrSize / 4) % 2]uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
// merge adds in the deltas from b into a.
|
// merge adds in the deltas from b into a.
|
||||||
@ -724,6 +725,7 @@ func (a *heapStatsDelta) merge(b *heapStatsDelta) {
|
|||||||
a.inWorkBufs += b.inWorkBufs
|
a.inWorkBufs += b.inWorkBufs
|
||||||
a.inPtrScalarBits += b.inPtrScalarBits
|
a.inPtrScalarBits += b.inPtrScalarBits
|
||||||
|
|
||||||
|
a.tinyAllocCount += b.tinyAllocCount
|
||||||
a.largeAlloc += b.largeAlloc
|
a.largeAlloc += b.largeAlloc
|
||||||
a.largeAllocCount += b.largeAllocCount
|
a.largeAllocCount += b.largeAllocCount
|
||||||
for i := range b.smallAllocCount {
|
for i := range b.smallAllocCount {
|
||||||
|
Loading…
Reference in New Issue
Block a user