1
0
mirror of https://github.com/golang/go synced 2024-11-11 22:20:22 -07:00

runtime: rename mcache fields to match Go style

This change renames a bunch of malloc statistics stored in the mcache
that are all named with the "local_" prefix. It also renames largeAlloc
to allocLarge to prevent a naming conflict, and next_sample because it
would be the last mcache field with the old C naming style.

Change-Id: I29695cb83b397a435ede7e9ad5c3c9be72767ea3
Reviewed-on: https://go-review.googlesource.com/c/go/+/246969
Trust: Michael Knyszek <mknyszek@google.com>
Run-TryBot: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Michael Pratt <mpratt@google.com>
This commit is contained in:
Michael Anthony Knyszek 2020-07-24 19:58:31 +00:00 committed by Michael Knyszek
parent d677899e90
commit c863849800
7 changed files with 71 additions and 71 deletions

View File

@ -346,18 +346,18 @@ func ReadMemStatsSlow() (base, slow MemStats) {
continue
}
// Collect large allocation stats.
largeFree += uint64(c.local_largefree)
slow.Frees += uint64(c.local_nlargefree)
largeFree += uint64(c.largeFree)
slow.Frees += uint64(c.largeFreeCount)
// Collect tiny allocation stats.
tinyAllocs += uint64(c.local_tinyallocs)
tinyAllocs += uint64(c.tinyAllocCount)
// Collect per-sizeclass stats.
for i := 0; i < _NumSizeClasses; i++ {
slow.Frees += uint64(c.local_nsmallfree[i])
bySize[i].Frees += uint64(c.local_nsmallfree[i])
bySize[i].Mallocs += uint64(c.local_nsmallfree[i])
smallFree += uint64(c.local_nsmallfree[i]) * uint64(class_to_size[i])
slow.Frees += uint64(c.smallFreeCount[i])
bySize[i].Frees += uint64(c.smallFreeCount[i])
bySize[i].Mallocs += uint64(c.smallFreeCount[i])
smallFree += uint64(c.smallFreeCount[i]) * uint64(class_to_size[i])
}
}
slow.Frees += tinyAllocs

View File

@ -1040,7 +1040,7 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
// The object fits into existing tiny block.
x = unsafe.Pointer(c.tiny + off)
c.tinyoffset = off + size
c.local_tinyallocs++
c.tinyAllocCount++
mp.mallocing = 0
releasem(mp)
return x
@ -1082,7 +1082,7 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
}
} else {
shouldhelpgc = true
span = c.largeAlloc(size, needzero, noscan)
span = c.allocLarge(size, needzero, noscan)
span.freeindex = 1
span.allocCount = 1
x = unsafe.Pointer(span.base())
@ -1111,7 +1111,7 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
} else {
scanSize = typ.ptrdata
}
c.local_scan += scanSize
c.scanAlloc += scanSize
}
// Ensure that the stores above that initialize x to
@ -1153,8 +1153,8 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
}
if rate := MemProfileRate; rate > 0 {
if rate != 1 && size < c.next_sample {
c.next_sample -= size
if rate != 1 && size < c.nextSample {
c.nextSample -= size
} else {
mp := acquirem()
profilealloc(mp, x, size)
@ -1221,7 +1221,7 @@ func profilealloc(mp *m, x unsafe.Pointer, size uintptr) {
throw("profilealloc called with no P")
}
}
c.next_sample = nextSample()
c.nextSample = nextSample()
mProf_Malloc(x, size)
}

View File

@ -20,8 +20,8 @@ import (
type mcache struct {
// The following members are accessed on every malloc,
// so they are grouped here for better caching.
next_sample uintptr // trigger heap sample after allocating this many bytes
local_scan uintptr // bytes of scannable heap allocated
nextSample uintptr // trigger heap sample after allocating this many bytes
scanAlloc uintptr // bytes of scannable heap allocated
// Allocator cache for tiny objects w/o pointers.
// See "Tiny allocator" comment in malloc.go.
@ -48,13 +48,13 @@ type mcache struct {
// When read with stats from other mcaches and with the world
// stopped, the result will accurately reflect the state of the
// application.
local_tinyallocs uintptr // number of tiny allocs not counted in other stats
local_largealloc uintptr // bytes allocated for large objects
local_nlargealloc uintptr // number of large object allocations
local_nsmallalloc [_NumSizeClasses]uintptr // number of allocs for small objects
local_largefree uintptr // bytes freed for large objects (>maxsmallsize)
local_nlargefree uintptr // number of frees for large objects (>maxsmallsize)
local_nsmallfree [_NumSizeClasses]uintptr // number of frees for small objects (<=maxsmallsize)
tinyAllocCount uintptr // number of tiny allocs not counted in other stats
largeAlloc uintptr // bytes allocated for large objects
largeAllocCount uintptr // number of large object allocations
smallAllocCount [_NumSizeClasses]uintptr // number of allocs for small objects
largeFree uintptr // bytes freed for large objects (>maxSmallSize)
largeFreeCount uintptr // number of frees for large objects (>maxSmallSize)
smallFreeCount [_NumSizeClasses]uintptr // number of frees for small objects (<=maxSmallSize)
// flushGen indicates the sweepgen during which this mcache
// was last flushed. If flushGen != mheap_.sweepgen, the spans
@ -103,7 +103,7 @@ func allocmcache() *mcache {
for i := range c.alloc {
c.alloc[i] = &emptymspan
}
c.next_sample = nextSample()
c.nextSample = nextSample()
return c
}
@ -134,26 +134,26 @@ func freemcache(c *mcache, recipient *mcache) {
// donate flushes data and resources which have no global
// pool to another mcache.
func (c *mcache) donate(d *mcache) {
// local_scan is handled separately because it's not
// scanAlloc is handled separately because it's not
// like these stats -- it's used for GC pacing.
d.local_largealloc += c.local_largealloc
c.local_largealloc = 0
d.local_nlargealloc += c.local_nlargealloc
c.local_nlargealloc = 0
for i := range c.local_nsmallalloc {
d.local_nsmallalloc[i] += c.local_nsmallalloc[i]
c.local_nsmallalloc[i] = 0
d.largeAlloc += c.largeAlloc
c.largeAlloc = 0
d.largeAllocCount += c.largeAllocCount
c.largeAllocCount = 0
for i := range c.smallAllocCount {
d.smallAllocCount[i] += c.smallAllocCount[i]
c.smallAllocCount[i] = 0
}
d.local_largefree += c.local_largefree
c.local_largefree = 0
d.local_nlargefree += c.local_nlargefree
c.local_nlargefree = 0
for i := range c.local_nsmallfree {
d.local_nsmallfree[i] += c.local_nsmallfree[i]
c.local_nsmallfree[i] = 0
d.largeFree += c.largeFree
c.largeFree = 0
d.largeFreeCount += c.largeFreeCount
c.largeFreeCount = 0
for i := range c.smallFreeCount {
d.smallFreeCount[i] += c.smallFreeCount[i]
c.smallFreeCount[i] = 0
}
d.local_tinyallocs += c.local_tinyallocs
c.local_tinyallocs = 0
d.tinyAllocCount += c.tinyAllocCount
c.tinyAllocCount = 0
}
// refill acquires a new span of span class spc for c. This span will
@ -192,16 +192,16 @@ func (c *mcache) refill(spc spanClass) {
// Assume all objects from this span will be allocated in the
// mcache. If it gets uncached, we'll adjust this.
c.local_nsmallalloc[spc.sizeclass()] += uintptr(s.nelems) - uintptr(s.allocCount)
c.smallAllocCount[spc.sizeclass()] += uintptr(s.nelems) - uintptr(s.allocCount)
// Update heap_live with the same assumption.
usedBytes := uintptr(s.allocCount) * s.elemsize
atomic.Xadd64(&memstats.heap_live, int64(s.npages*pageSize)-int64(usedBytes))
// While we're here, flush local_scan, since we have to call
// While we're here, flush scanAlloc, since we have to call
// revise anyway.
atomic.Xadd64(&memstats.heap_scan, int64(c.local_scan))
c.local_scan = 0
atomic.Xadd64(&memstats.heap_scan, int64(c.scanAlloc))
c.scanAlloc = 0
if trace.enabled {
// heap_live changed.
@ -215,8 +215,8 @@ func (c *mcache) refill(spc spanClass) {
c.alloc[spc] = s
}
// largeAlloc allocates a span for a large object.
func (c *mcache) largeAlloc(size uintptr, needzero bool, noscan bool) *mspan {
// allocLarge allocates a span for a large object.
func (c *mcache) allocLarge(size uintptr, needzero bool, noscan bool) *mspan {
if size+_PageSize < size {
throw("out of memory")
}
@ -235,8 +235,8 @@ func (c *mcache) largeAlloc(size uintptr, needzero bool, noscan bool) *mspan {
if s == nil {
throw("out of memory")
}
c.local_largealloc += npages * pageSize
c.local_nlargealloc++
c.largeAlloc += npages * pageSize
c.largeAllocCount++
// Update heap_live and revise pacing if needed.
atomic.Xadd64(&memstats.heap_live, int64(npages*pageSize))
@ -257,9 +257,9 @@ func (c *mcache) largeAlloc(size uintptr, needzero bool, noscan bool) *mspan {
}
func (c *mcache) releaseAll() {
// Take this opportunity to flush local_scan.
atomic.Xadd64(&memstats.heap_scan, int64(c.local_scan))
c.local_scan = 0
// Take this opportunity to flush scanAlloc.
atomic.Xadd64(&memstats.heap_scan, int64(c.scanAlloc))
c.scanAlloc = 0
sg := mheap_.sweepgen
for i := range c.alloc {
@ -267,7 +267,7 @@ func (c *mcache) releaseAll() {
if s != &emptymspan {
// Adjust nsmallalloc in case the span wasn't fully allocated.
n := uintptr(s.nelems) - uintptr(s.allocCount)
c.local_nsmallalloc[spanClass(i).sizeclass()] -= n
c.smallAllocCount[spanClass(i).sizeclass()] -= n
if s.sweepgen != sg+1 {
// refill conservatively counted unallocated slots in heap_live.
// Undo this.

View File

@ -2086,16 +2086,16 @@ func gcMark(start_time int64) {
// Update the marked heap stat.
memstats.heap_marked = work.bytesMarked
// Flush local_scan from each mcache since we're about to modify
// heap_scan directly. If we were to flush this later, then local_scan
// Flush scanAlloc from each mcache since we're about to modify
// heap_scan directly. If we were to flush this later, then scanAlloc
// might have incorrect information.
for _, p := range allp {
c := p.mcache
if c == nil {
continue
}
memstats.heap_scan += uint64(c.local_scan)
c.local_scan = 0
memstats.heap_scan += uint64(c.scanAlloc)
c.scanAlloc = 0
}
// Update other GC heap size stats. This must happen after

View File

@ -503,7 +503,7 @@ func (s *mspan) sweep(preserve bool) bool {
// wasn't totally filled, but then swept, still has all of its
// free slots zeroed.
s.needzero = 1
c.local_nsmallfree[spc.sizeclass()] += uintptr(nfreed)
c.smallFreeCount[spc.sizeclass()] += uintptr(nfreed)
}
if !preserve {
// The caller may not have removed this span from whatever
@ -548,8 +548,8 @@ func (s *mspan) sweep(preserve bool) bool {
} else {
mheap_.freeSpan(s)
}
c.local_nlargefree++
c.local_largefree += size
c.largeFreeCount++
c.largeFree += size
return true
}

View File

@ -565,25 +565,25 @@ func updatememstats() {
continue
}
// Collect large allocation stats.
memstats.nmalloc += uint64(c.local_nlargealloc)
totalAlloc += uint64(c.local_largealloc)
totalFree += uint64(c.local_largefree)
memstats.nfree += uint64(c.local_nlargefree)
memstats.nmalloc += uint64(c.largeAllocCount)
totalAlloc += uint64(c.largeAlloc)
totalFree += uint64(c.largeFree)
memstats.nfree += uint64(c.largeFreeCount)
// Collect tiny allocation stats.
memstats.tinyallocs += uint64(c.local_tinyallocs)
memstats.tinyallocs += uint64(c.tinyAllocCount)
// Collect per-sizeclass stats.
for i := 0; i < _NumSizeClasses; i++ {
// Malloc stats.
memstats.nmalloc += uint64(c.local_nsmallalloc[i])
memstats.by_size[i].nmalloc += uint64(c.local_nsmallalloc[i])
totalAlloc += uint64(c.local_nsmallalloc[i]) * uint64(class_to_size[i])
memstats.nmalloc += uint64(c.smallAllocCount[i])
memstats.by_size[i].nmalloc += uint64(c.smallAllocCount[i])
totalAlloc += uint64(c.smallAllocCount[i]) * uint64(class_to_size[i])
// Free stats.
memstats.nfree += uint64(c.local_nsmallfree[i])
memstats.by_size[i].nfree += uint64(c.local_nsmallfree[i])
smallFree += uint64(c.local_nsmallfree[i]) * uint64(class_to_size[i])
memstats.nfree += uint64(c.smallFreeCount[i])
memstats.by_size[i].nfree += uint64(c.smallFreeCount[i])
smallFree += uint64(c.smallFreeCount[i]) * uint64(class_to_size[i])
}
}

View File

@ -70,7 +70,7 @@ func TestMemoryProfiler(t *testing.T) {
runtime.MemProfileRate = oldRate
}()
// Allocate a meg to ensure that mcache.next_sample is updated to 1.
// Allocate a meg to ensure that mcache.nextSample is updated to 1.
for i := 0; i < 1024; i++ {
memSink = make([]byte, 1024)
}