From 66d5c9b1e9c30908608469f30b0bb72cb3014600 Mon Sep 17 00:00:00 2001 From: Dmitriy Vyukov Date: Mon, 18 Jul 2011 14:52:57 -0400 Subject: [PATCH] runtime: add per-M caches for MemStats Avoid touching centralized state during memory manager opreations. R=rsc CC=golang-dev https://golang.org/cl/4766042 --- src/pkg/runtime/malloc.goc | 64 ++++++++++++++++++++++---------------- src/pkg/runtime/malloc.h | 21 ++++++++++--- src/pkg/runtime/mcache.c | 4 +-- src/pkg/runtime/mgc0.c | 29 ++++++++++++----- src/pkg/runtime/mheap.c | 10 ++---- test/mallocrep.go | 1 + test/mallocrep1.go | 2 ++ 7 files changed, 82 insertions(+), 49 deletions(-) diff --git a/src/pkg/runtime/malloc.goc b/src/pkg/runtime/malloc.goc index 4274e3e162c..b9fe36db6d8 100644 --- a/src/pkg/runtime/malloc.goc +++ b/src/pkg/runtime/malloc.goc @@ -38,18 +38,18 @@ runtime·mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed) if(size == 0) size = 1; - mstats.nmalloc++; + c = m->mcache; + c->local_nmalloc++; if(size <= MaxSmallSize) { // Allocate from mcache free lists. sizeclass = runtime·SizeToClass(size); size = runtime·class_to_size[sizeclass]; - c = m->mcache; v = runtime·MCache_Alloc(c, sizeclass, size, zeroed); if(v == nil) runtime·throw("out of memory"); - mstats.alloc += size; - mstats.total_alloc += size; - mstats.by_size[sizeclass].nmalloc++; + c->local_alloc += size; + c->local_total_alloc += size; + c->local_by_size[sizeclass].nmalloc++; } else { // TODO(rsc): Report tracebacks for very large allocations. @@ -61,8 +61,8 @@ runtime·mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed) if(s == nil) runtime·throw("out of memory"); size = npages<local_alloc += size; + c->local_total_alloc += size; v = (void*)(s->start << PageShift); // setup for mark sweep @@ -128,6 +128,7 @@ runtime·free(void *v) // Find size class for v. sizeclass = s->sizeclass; + c = m->mcache; if(sizeclass == 0) { // Large object. size = s->npages<mcache; size = runtime·class_to_size[sizeclass]; if(size > sizeof(uintptr)) ((uintptr*)v)[1] = 1; // mark as "needs to be zeroed" @@ -147,10 +147,10 @@ runtime·free(void *v) // it might coalesce v and other blocks into a bigger span // and change the bitmap further. runtime·markfreed(v, size); - mstats.by_size[sizeclass].nfree++; + c->local_by_size[sizeclass].nfree++; runtime·MCache_Free(c, v, sizeclass, size); } - mstats.alloc -= size; + c->local_alloc -= size; if(prof) runtime·MProf_Free(v, size); m->mallocing = 0; @@ -163,7 +163,7 @@ runtime·mlookup(void *v, byte **base, uintptr *size, MSpan **sp) byte *p; MSpan *s; - mstats.nlookup++; + m->mcache->local_nlookup++; s = runtime·MHeap_LookupMaybe(&runtime·mheap, v); if(sp) *sp = s; @@ -192,9 +192,10 @@ runtime·mlookup(void *v, byte **base, uintptr *size, MSpan **sp) } n = runtime·class_to_size[s->sizeclass]; - i = ((byte*)v - p)/n; - if(base) + if(base) { + i = ((byte*)v - p)/n; *base = p + i*n; + } if(size) *size = n; @@ -214,6 +215,29 @@ runtime·allocmcache(void) return c; } +void +runtime·purgecachedstats(M* m) +{ + MCache *c; + + // Protected by either heap or GC lock. + c = m->mcache; + mstats.heap_alloc += c->local_cachealloc; + c->local_cachealloc = 0; + mstats.heap_objects += c->local_objects; + c->local_objects = 0; + mstats.nmalloc += c->local_nmalloc; + c->local_nmalloc = 0; + mstats.nfree += c->local_nfree; + c->local_nfree = 0; + mstats.nlookup += c->local_nlookup; + c->local_nlookup = 0; + mstats.alloc += c->local_alloc; + c->local_alloc= 0; + mstats.total_alloc += c->local_total_alloc; + c->local_total_alloc= 0; +} + uintptr runtime·sizeof_C_MStats = sizeof(MStats); #define MaxArena32 (2U<<30) @@ -361,9 +385,6 @@ func new(n uint32) (ret *uint8) { void* runtime·stackalloc(uint32 n) { - void *v; - uintptr sys0; - // Stackalloc must be called on scheduler stack, so that we // never try to grow the stack during the code that stackalloc runs. // Doing so would cause a deadlock (issue 1547). @@ -382,11 +403,7 @@ runtime·stackalloc(uint32 n) runtime·printf("stackalloc: in malloc, size=%d want %d", FixedStack, n); runtime·throw("stackalloc"); } - sys0 = m->stackalloc->sys; - v = runtime·FixAlloc_Alloc(m->stackalloc); - mstats.stacks_inuse += FixedStack; - mstats.stacks_sys += m->stackalloc->sys - sys0; - return v; + return runtime·FixAlloc_Alloc(m->stackalloc); } return runtime·mallocgc(n, FlagNoProfiling|FlagNoGC, 0, 0); } @@ -394,13 +411,8 @@ runtime·stackalloc(uint32 n) void runtime·stackfree(void *v, uintptr n) { - uintptr sys0; - if(m->mallocing || m->gcing || n == FixedStack) { - sys0 = m->stackalloc->sys; runtime·FixAlloc_Free(m->stackalloc, v); - mstats.stacks_inuse -= FixedStack; - mstats.stacks_sys += m->stackalloc->sys - sys0; return; } runtime·free(v); diff --git a/src/pkg/runtime/malloc.h b/src/pkg/runtime/malloc.h index d8d2111cf7b..2865317ef65 100644 --- a/src/pkg/runtime/malloc.h +++ b/src/pkg/runtime/malloc.h @@ -185,10 +185,10 @@ void runtime·FixAlloc_Free(FixAlloc *f, void *p); // Shared with Go: if you edit this structure, also edit extern.go. struct MStats { - // General statistics. No locking; approximate. + // General statistics. uint64 alloc; // bytes allocated and still in use uint64 total_alloc; // bytes allocated (even if freed) - uint64 sys; // bytes obtained from system (should be sum of xxx_sys below) + uint64 sys; // bytes obtained from system (should be sum of xxx_sys below, no locking, approximate) uint64 nlookup; // number of pointer lookups uint64 nmalloc; // number of mallocs uint64 nfree; // number of frees @@ -221,7 +221,6 @@ struct MStats bool debuggc; // Statistics about allocation size classes. - // No locking; approximate. struct { uint32 size; uint64 nmalloc; @@ -267,9 +266,20 @@ struct MCache { MCacheList list[NumSizeClasses]; uint64 size; - int64 local_alloc; // bytes allocated (or freed) since last lock of heap - int64 local_objects; // objects allocated (or freed) since last lock of heap + int64 local_cachealloc; // bytes allocated (or freed) from cache since last lock of heap + int64 local_objects; // objects allocated (or freed) from cache since last lock of heap + int64 local_alloc; // bytes allocated and still in use since last lock of heap + int64 local_total_alloc; // bytes allocated (even if freed) since last lock of heap + int64 local_nmalloc; // number of mallocs since last lock of heap + int64 local_nfree; // number of frees since last lock of heap + int64 local_nlookup; // number of pointer lookups since last lock of heap int32 next_sample; // trigger heap sample after allocating this many bytes + // Statistics about allocation size classes since last lock of heap + struct { + int64 nmalloc; + int64 nfree; + } local_by_size[NumSizeClasses]; + }; void* runtime·MCache_Alloc(MCache *c, int32 sizeclass, uintptr size, int32 zeroed); @@ -378,6 +388,7 @@ void runtime·markspan(void *v, uintptr size, uintptr n, bool leftover); void runtime·unmarkspan(void *v, uintptr size); bool runtime·blockspecial(void*); void runtime·setblockspecial(void*); +void runtime·purgecachedstats(M*); enum { diff --git a/src/pkg/runtime/mcache.c b/src/pkg/runtime/mcache.c index e406211862e..711e938fc56 100644 --- a/src/pkg/runtime/mcache.c +++ b/src/pkg/runtime/mcache.c @@ -48,7 +48,7 @@ runtime·MCache_Alloc(MCache *c, int32 sizeclass, uintptr size, int32 zeroed) v->next = nil; } } - c->local_alloc += size; + c->local_cachealloc += size; c->local_objects++; return v; } @@ -90,7 +90,7 @@ runtime·MCache_Free(MCache *c, void *v, int32 sizeclass, uintptr size) l->list = p; l->nlist++; c->size += size; - c->local_alloc -= size; + c->local_cachealloc -= size; c->local_objects--; if(l->nlist >= MaxMCacheListLen) { diff --git a/src/pkg/runtime/mgc0.c b/src/pkg/runtime/mgc0.c index ac6a1fa40de..bc373d89090 100644 --- a/src/pkg/runtime/mgc0.c +++ b/src/pkg/runtime/mgc0.c @@ -484,6 +484,7 @@ sweep(void) // Mark freed; restore block boundary bit. *bitp = (*bitp & ~(bitMask<mcache; if(s->sizeclass == 0) { // Free large span. runtime·unmarkspan(p, 1<mcache; if(size > sizeof(uintptr)) ((uintptr*)p)[1] = 1; // mark as "needs to be zeroed" - mstats.by_size[s->sizeclass].nfree++; + c->local_by_size[s->sizeclass].nfree++; runtime·MCache_Free(c, p, s->sizeclass, size); } - mstats.alloc -= size; - mstats.nfree++; + c->local_alloc -= size; + c->local_nfree++; } } } @@ -533,14 +533,26 @@ cachestats(void) { M *m; MCache *c; + int32 i; + uint64 stacks_inuse; + uint64 stacks_sys; + stacks_inuse = 0; + stacks_sys = 0; for(m=runtime·allm; m; m=m->alllink) { + runtime·purgecachedstats(m); + stacks_inuse += m->stackalloc->inuse; + stacks_sys += m->stackalloc->sys; c = m->mcache; - mstats.heap_alloc += c->local_alloc; - c->local_alloc = 0; - mstats.heap_objects += c->local_objects; - c->local_objects = 0; + for(i=0; ilocal_by_size); i++) { + mstats.by_size[i].nmalloc += c->local_by_size[i].nmalloc; + c->local_by_size[i].nmalloc = 0; + mstats.by_size[i].nfree += c->local_by_size[i].nfree; + c->local_by_size[i].nfree = 0; + } } + mstats.stacks_inuse = stacks_inuse; + mstats.stacks_sys = stacks_sys; } void @@ -603,6 +615,7 @@ runtime·gc(int32 force) sweep(); t2 = runtime·nanotime(); stealcache(); + cachestats(); mstats.next_gc = mstats.heap_alloc+mstats.heap_alloc*gcpercent/100; m->gcing = 0; diff --git a/src/pkg/runtime/mheap.c b/src/pkg/runtime/mheap.c index dde31ce3457..37d50568154 100644 --- a/src/pkg/runtime/mheap.c +++ b/src/pkg/runtime/mheap.c @@ -57,10 +57,7 @@ runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct) MSpan *s; runtime·lock(h); - mstats.heap_alloc += m->mcache->local_alloc; - m->mcache->local_alloc = 0; - mstats.heap_objects += m->mcache->local_objects; - m->mcache->local_objects = 0; + runtime·purgecachedstats(m); s = MHeap_AllocLocked(h, npage, sizeclass); if(s != nil) { mstats.heap_inuse += npage<mcache->local_alloc; - m->mcache->local_alloc = 0; - mstats.heap_objects += m->mcache->local_objects; - m->mcache->local_objects = 0; + runtime·purgecachedstats(m); mstats.heap_inuse -= s->npages<npages<