diff --git a/src/pkg/runtime/malloc.cgo b/src/pkg/runtime/malloc.cgo index 53411da1b19..5b43b3c9e76 100644 --- a/src/pkg/runtime/malloc.cgo +++ b/src/pkg/runtime/malloc.cgo @@ -48,6 +48,12 @@ mallocgc(uintptr size, uint32 refflag, int32 dogc, int32 zeroed) mstats.alloc += size; mstats.total_alloc += size; mstats.by_size[sizeclass].nmalloc++; + + if(!mlookup(v, nil, nil, nil, &ref)) { + printf("malloc %D; mlookup failed\n", (uint64)size); + throw("malloc mlookup"); + } + *ref = RefNone | refflag; } else { // TODO(rsc): Report tracebacks for very large allocations. @@ -61,14 +67,10 @@ mallocgc(uintptr size, uint32 refflag, int32 dogc, int32 zeroed) mstats.alloc += npages<start << PageShift); - } - // setup for mark sweep - if(!mlookup(v, nil, nil, &ref)) { - printf("malloc %D; mlookup failed\n", (uint64)size); - throw("malloc mlookup"); + // setup for mark sweep + s->gcref0 = RefNone | refflag; } - *ref = RefNone | refflag; m->mallocing = 0; @@ -88,7 +90,6 @@ void free(void *v) { int32 sizeclass, size; - uintptr page, tmp; MSpan *s; MCache *c; uint32 *ref; @@ -100,46 +101,34 @@ free(void *v) throw("malloc/free - deadlock"); m->mallocing = 1; - if(!mlookup(v, nil, nil, &ref)) { + if(!mlookup(v, nil, nil, &s, &ref)) { printf("free %p: not an allocated block\n", v); throw("free mlookup"); } *ref = RefFree; // Find size class for v. - page = (uintptr)v >> PageShift; - sizeclass = MHeapMapCache_GET(&mheap.mapcache, page, tmp); + sizeclass = s->sizeclass; if(sizeclass == 0) { - // Missed in cache. - s = MHeap_Lookup(&mheap, page); - if(s == nil) - throw("free - invalid pointer"); - sizeclass = s->sizeclass; - if(sizeclass == 0) { - // Large object. - mstats.alloc -= s->npages<npages<npages<npages<mcache; + size = class_to_size[sizeclass]; + if(size > sizeof(uintptr)) + ((uintptr*)v)[1] = 1; // mark as "needs to be zeroed" + mstats.alloc -= size; + mstats.by_size[sizeclass].nfree++; + MCache_Free(c, v, sizeclass, size); } - - // Small object. - c = m->mcache; - size = class_to_size[sizeclass]; - if(size > sizeof(uintptr)) - ((uintptr*)v)[1] = 1; // mark as "needs to be zeroed" - mstats.alloc -= size; - mstats.by_size[sizeclass].nfree++; - MCache_Free(c, v, sizeclass, size); - -out: m->mallocing = 0; } int32 -mlookup(void *v, byte **base, uintptr *size, uint32 **ref) +mlookup(void *v, byte **base, uintptr *size, MSpan **sp, uint32 **ref) { uintptr n, nobj, i; byte *p; @@ -147,6 +136,8 @@ mlookup(void *v, byte **base, uintptr *size, uint32 **ref) mstats.nlookup++; s = MHeap_LookupMaybe(&mheap, (uintptr)v>>PageShift); + if(sp) + *sp = s; if(s == nil) { if(base) *base = nil; @@ -256,7 +247,7 @@ stackalloc(uint32 n) return v; } v = mallocgc(n, 0, 0, 0); - if(!mlookup(v, nil, nil, &ref)) + if(!mlookup(v, nil, nil, nil, &ref)) throw("stackalloc mlookup"); *ref = RefStack; return v; @@ -283,7 +274,7 @@ func Free(p *byte) { } func Lookup(p *byte) (base *byte, size uintptr) { - mlookup(p, &base, &size, nil); + mlookup(p, &base, &size, nil, nil); } func GC() { @@ -306,7 +297,7 @@ func SetFinalizer(obj Eface, finalizer Eface) { printf("runtime.SetFinalizer: first argument is %S, not pointer\n", *obj.type->string); goto throw; } - if(!mlookup(obj.data, &base, &size, nil) || obj.data != base) { + if(!mlookup(obj.data, &base, &size, nil, nil) || obj.data != base) { printf("runtime.SetFinalizer: pointer not at beginning of allocated block\n"); goto throw; } diff --git a/src/pkg/runtime/malloc.h b/src/pkg/runtime/malloc.h index f018b6e216a..b9dea2f5e91 100644 --- a/src/pkg/runtime/malloc.h +++ b/src/pkg/runtime/malloc.h @@ -20,8 +20,6 @@ // MHeap: the malloc heap, managed at page (4096-byte) granularity. // MSpan: a run of pages managed by the MHeap. // MHeapMap: a mapping from page IDs to MSpans. -// MHeapMapCache: a small cache of MHeapMap mapping page IDs -// to size classes for pages used for small objects. // MCentral: a shared free list for a given size class. // MCache: a per-thread (in Go, per-M) cache for small objects. // MStats: allocation statistics. @@ -87,7 +85,6 @@ typedef struct FixAlloc FixAlloc; typedef struct MCentral MCentral; typedef struct MHeap MHeap; typedef struct MHeapMap MHeapMap; -typedef struct MHeapMapCache MHeapMapCache; typedef struct MSpan MSpan; typedef struct MStats MStats; typedef struct MLink MLink; @@ -296,7 +293,6 @@ struct MHeap // span lookup MHeapMap map; - MHeapMapCache mapcache; // range of addresses we might see in the heap byte *min; @@ -324,7 +320,7 @@ MSpan* MHeap_LookupMaybe(MHeap *h, PageID p); void MGetSizeClassInfo(int32 sizeclass, int32 *size, int32 *npages, int32 *nobj); void* mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed); -int32 mlookup(void *v, byte **base, uintptr *size, uint32 **ref); +int32 mlookup(void *v, byte **base, uintptr *size, MSpan **s, uint32 **ref); void gc(int32 force); void* SysAlloc(uintptr); diff --git a/src/pkg/runtime/mfinal.c b/src/pkg/runtime/mfinal.c index 4fad6aa951c..95917472314 100644 --- a/src/pkg/runtime/mfinal.c +++ b/src/pkg/runtime/mfinal.c @@ -97,7 +97,7 @@ addfinalizer(void *p, void (*f)(void*), int32 nret) uint32 *ref; byte *base; - if(!mlookup(p, &base, nil, &ref) || p != base) + if(!mlookup(p, &base, nil, nil, &ref) || p != base) throw("addfinalizer on invalid pointer"); if(f == nil) { if(*ref & RefHasFinalizer) { diff --git a/src/pkg/runtime/mgc0.c b/src/pkg/runtime/mgc0.c index 0870b3a6b09..82a8ad7e5b2 100644 --- a/src/pkg/runtime/mgc0.c +++ b/src/pkg/runtime/mgc0.c @@ -65,7 +65,7 @@ scanblock(int32 depth, byte *b, int64 n) obj = vp[i]; if(obj == nil || (byte*)obj < mheap.min || (byte*)obj >= mheap.max) continue; - if(mlookup(obj, &obj, &size, &refp)) { + if(mlookup(obj, &obj, &size, nil, &refp)) { ref = *refp; switch(ref & ~(RefNoPointers|RefHasFinalizer)) { case RefFinalize: diff --git a/src/pkg/runtime/mheap.c b/src/pkg/runtime/mheap.c index e78c860c312..49ff3622ff0 100644 --- a/src/pkg/runtime/mheap.c +++ b/src/pkg/runtime/mheap.c @@ -108,27 +108,11 @@ HaveSpan: MHeap_FreeLocked(h, t); } - // If span is being used for small objects, cache size class. - // No matter what, cache span info, because gc needs to be + // Record span info, because gc needs to be // able to map interior pointer to containing span. s->sizeclass = sizeclass; for(n=0; nmap, s->start+n, s); - if(sizeclass == 0) { - uintptr tmp; - - // If there are entries for this span, invalidate them, - // but don't blow out cache entries about other spans. - for(n=0; nmapcache, s->start+n, tmp) != 0) - MHeapMapCache_SET(&h->mapcache, s->start+n, 0); - } else { - // Save cache entries for this span. - // If there's a size class, there aren't that many pages. - for(n=0; nmapcache, s->start+n, sizeclass); - } - return s; } diff --git a/src/pkg/runtime/mheapmap32.h b/src/pkg/runtime/mheapmap32.h index 0a16ccd100d..cb8a830d07e 100644 --- a/src/pkg/runtime/mheapmap32.h +++ b/src/pkg/runtime/mheapmap32.h @@ -39,38 +39,3 @@ MSpan* MHeapMap_GetMaybe(MHeapMap *m, PageID k); void MHeapMap_Set(MHeapMap *m, PageID k, MSpan *v); -// Much of the time, free(v) needs to know only the size class for v, -// not which span it came from. The MHeapMap finds the size class -// by looking up the span. -// -// An MHeapMapCache is a simple direct-mapped cache translating -// page numbers to size classes. It avoids the expensive MHeapMap -// lookup for hot pages. -// -// The cache entries are 32 bits, with the page number in the low part -// and the value at the top. -// -// NOTE(rsc): On a machine with 32-bit addresses (= 20-bit page numbers), -// we can use a 16-bit cache entry by not storing the redundant 12 bits -// of the key that are used as the entry index. For now, keep it simple. -enum -{ - MHeapMapCache_HashBits = 12 -}; - -struct MHeapMapCache -{ - uint32 array[1<array[(key) & HMASK] = (key) | ((uintptr)(value) << KBITS)) - -#define MHeapMapCache_GET(cache, key, tmp) \ - (tmp = (cache)->array[(key) & HMASK], \ - (tmp & KMASK) == (key) ? (tmp >> KBITS) : 0) diff --git a/src/pkg/runtime/mheapmap64.h b/src/pkg/runtime/mheapmap64.h index 127b773f749..fefeae65d6d 100644 --- a/src/pkg/runtime/mheapmap64.h +++ b/src/pkg/runtime/mheapmap64.h @@ -58,39 +58,3 @@ MSpan* MHeapMap_GetMaybe(MHeapMap *m, PageID k); void MHeapMap_Set(MHeapMap *m, PageID k, MSpan *v); -// Much of the time, free(v) needs to know only the size class for v, -// not which span it came from. The MHeapMap finds the size class -// by looking up the span. -// -// An MHeapMapCache is a simple direct-mapped cache translating -// page numbers to size classes. It avoids the expensive MHeapMap -// lookup for hot pages. -// -// The cache entries are 64 bits, with the page number in the low part -// and the value at the top. -// -// NOTE(rsc): On a machine with 32-bit addresses (= 20-bit page numbers), -// we can use a 16-bit cache entry by not storing the redundant 12 bits -// of the key that are used as the entry index. Here in 64-bit land, -// that trick won't work unless the hash table has 2^28 entries. -enum -{ - MHeapMapCache_HashBits = 12 -}; - -struct MHeapMapCache -{ - uintptr array[1<array[(key) & HMASK] = (key) | ((uintptr)(value) << KBITS)) - -#define MHeapMapCache_GET(cache, key, tmp) \ - (tmp = (cache)->array[(key) & HMASK], \ - (tmp & KMASK) == (key) ? (tmp >> KBITS) : 0)