mirror of
https://github.com/golang/go
synced 2024-11-22 02:24:41 -07:00
runtime: delete MHeapMapCache, which is useless
because free needs to mark the block as freed to coordinate with the garbage collector. (in C++ free can blindly put the block on the free list, no questions asked, so the cache saves some work.) R=iant CC=golang-dev https://golang.org/cl/206069
This commit is contained in:
parent
fc8e3d4004
commit
22a7f2a14d
@ -48,6 +48,12 @@ mallocgc(uintptr size, uint32 refflag, int32 dogc, int32 zeroed)
|
|||||||
mstats.alloc += size;
|
mstats.alloc += size;
|
||||||
mstats.total_alloc += size;
|
mstats.total_alloc += size;
|
||||||
mstats.by_size[sizeclass].nmalloc++;
|
mstats.by_size[sizeclass].nmalloc++;
|
||||||
|
|
||||||
|
if(!mlookup(v, nil, nil, nil, &ref)) {
|
||||||
|
printf("malloc %D; mlookup failed\n", (uint64)size);
|
||||||
|
throw("malloc mlookup");
|
||||||
|
}
|
||||||
|
*ref = RefNone | refflag;
|
||||||
} else {
|
} else {
|
||||||
// TODO(rsc): Report tracebacks for very large allocations.
|
// TODO(rsc): Report tracebacks for very large allocations.
|
||||||
|
|
||||||
@ -61,14 +67,10 @@ mallocgc(uintptr size, uint32 refflag, int32 dogc, int32 zeroed)
|
|||||||
mstats.alloc += npages<<PageShift;
|
mstats.alloc += npages<<PageShift;
|
||||||
mstats.total_alloc += npages<<PageShift;
|
mstats.total_alloc += npages<<PageShift;
|
||||||
v = (void*)(s->start << PageShift);
|
v = (void*)(s->start << PageShift);
|
||||||
}
|
|
||||||
|
|
||||||
// setup for mark sweep
|
// setup for mark sweep
|
||||||
if(!mlookup(v, nil, nil, &ref)) {
|
s->gcref0 = RefNone | refflag;
|
||||||
printf("malloc %D; mlookup failed\n", (uint64)size);
|
|
||||||
throw("malloc mlookup");
|
|
||||||
}
|
}
|
||||||
*ref = RefNone | refflag;
|
|
||||||
|
|
||||||
m->mallocing = 0;
|
m->mallocing = 0;
|
||||||
|
|
||||||
@ -88,7 +90,6 @@ void
|
|||||||
free(void *v)
|
free(void *v)
|
||||||
{
|
{
|
||||||
int32 sizeclass, size;
|
int32 sizeclass, size;
|
||||||
uintptr page, tmp;
|
|
||||||
MSpan *s;
|
MSpan *s;
|
||||||
MCache *c;
|
MCache *c;
|
||||||
uint32 *ref;
|
uint32 *ref;
|
||||||
@ -100,46 +101,34 @@ free(void *v)
|
|||||||
throw("malloc/free - deadlock");
|
throw("malloc/free - deadlock");
|
||||||
m->mallocing = 1;
|
m->mallocing = 1;
|
||||||
|
|
||||||
if(!mlookup(v, nil, nil, &ref)) {
|
if(!mlookup(v, nil, nil, &s, &ref)) {
|
||||||
printf("free %p: not an allocated block\n", v);
|
printf("free %p: not an allocated block\n", v);
|
||||||
throw("free mlookup");
|
throw("free mlookup");
|
||||||
}
|
}
|
||||||
*ref = RefFree;
|
*ref = RefFree;
|
||||||
|
|
||||||
// Find size class for v.
|
// Find size class for v.
|
||||||
page = (uintptr)v >> PageShift;
|
sizeclass = s->sizeclass;
|
||||||
sizeclass = MHeapMapCache_GET(&mheap.mapcache, page, tmp);
|
|
||||||
if(sizeclass == 0) {
|
if(sizeclass == 0) {
|
||||||
// Missed in cache.
|
// Large object.
|
||||||
s = MHeap_Lookup(&mheap, page);
|
mstats.alloc -= s->npages<<PageShift;
|
||||||
if(s == nil)
|
runtime_memclr(v, s->npages<<PageShift);
|
||||||
throw("free - invalid pointer");
|
MHeap_Free(&mheap, s);
|
||||||
sizeclass = s->sizeclass;
|
} else {
|
||||||
if(sizeclass == 0) {
|
// Small object.
|
||||||
// Large object.
|
c = m->mcache;
|
||||||
mstats.alloc -= s->npages<<PageShift;
|
size = class_to_size[sizeclass];
|
||||||
runtime_memclr(v, s->npages<<PageShift);
|
if(size > sizeof(uintptr))
|
||||||
MHeap_Free(&mheap, s);
|
((uintptr*)v)[1] = 1; // mark as "needs to be zeroed"
|
||||||
goto out;
|
mstats.alloc -= size;
|
||||||
}
|
mstats.by_size[sizeclass].nfree++;
|
||||||
MHeapMapCache_SET(&mheap.mapcache, page, sizeclass);
|
MCache_Free(c, v, sizeclass, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Small object.
|
|
||||||
c = m->mcache;
|
|
||||||
size = class_to_size[sizeclass];
|
|
||||||
if(size > sizeof(uintptr))
|
|
||||||
((uintptr*)v)[1] = 1; // mark as "needs to be zeroed"
|
|
||||||
mstats.alloc -= size;
|
|
||||||
mstats.by_size[sizeclass].nfree++;
|
|
||||||
MCache_Free(c, v, sizeclass, size);
|
|
||||||
|
|
||||||
out:
|
|
||||||
m->mallocing = 0;
|
m->mallocing = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32
|
int32
|
||||||
mlookup(void *v, byte **base, uintptr *size, uint32 **ref)
|
mlookup(void *v, byte **base, uintptr *size, MSpan **sp, uint32 **ref)
|
||||||
{
|
{
|
||||||
uintptr n, nobj, i;
|
uintptr n, nobj, i;
|
||||||
byte *p;
|
byte *p;
|
||||||
@ -147,6 +136,8 @@ mlookup(void *v, byte **base, uintptr *size, uint32 **ref)
|
|||||||
|
|
||||||
mstats.nlookup++;
|
mstats.nlookup++;
|
||||||
s = MHeap_LookupMaybe(&mheap, (uintptr)v>>PageShift);
|
s = MHeap_LookupMaybe(&mheap, (uintptr)v>>PageShift);
|
||||||
|
if(sp)
|
||||||
|
*sp = s;
|
||||||
if(s == nil) {
|
if(s == nil) {
|
||||||
if(base)
|
if(base)
|
||||||
*base = nil;
|
*base = nil;
|
||||||
@ -256,7 +247,7 @@ stackalloc(uint32 n)
|
|||||||
return v;
|
return v;
|
||||||
}
|
}
|
||||||
v = mallocgc(n, 0, 0, 0);
|
v = mallocgc(n, 0, 0, 0);
|
||||||
if(!mlookup(v, nil, nil, &ref))
|
if(!mlookup(v, nil, nil, nil, &ref))
|
||||||
throw("stackalloc mlookup");
|
throw("stackalloc mlookup");
|
||||||
*ref = RefStack;
|
*ref = RefStack;
|
||||||
return v;
|
return v;
|
||||||
@ -283,7 +274,7 @@ func Free(p *byte) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func Lookup(p *byte) (base *byte, size uintptr) {
|
func Lookup(p *byte) (base *byte, size uintptr) {
|
||||||
mlookup(p, &base, &size, nil);
|
mlookup(p, &base, &size, nil, nil);
|
||||||
}
|
}
|
||||||
|
|
||||||
func GC() {
|
func GC() {
|
||||||
@ -306,7 +297,7 @@ func SetFinalizer(obj Eface, finalizer Eface) {
|
|||||||
printf("runtime.SetFinalizer: first argument is %S, not pointer\n", *obj.type->string);
|
printf("runtime.SetFinalizer: first argument is %S, not pointer\n", *obj.type->string);
|
||||||
goto throw;
|
goto throw;
|
||||||
}
|
}
|
||||||
if(!mlookup(obj.data, &base, &size, nil) || obj.data != base) {
|
if(!mlookup(obj.data, &base, &size, nil, nil) || obj.data != base) {
|
||||||
printf("runtime.SetFinalizer: pointer not at beginning of allocated block\n");
|
printf("runtime.SetFinalizer: pointer not at beginning of allocated block\n");
|
||||||
goto throw;
|
goto throw;
|
||||||
}
|
}
|
||||||
|
@ -20,8 +20,6 @@
|
|||||||
// MHeap: the malloc heap, managed at page (4096-byte) granularity.
|
// MHeap: the malloc heap, managed at page (4096-byte) granularity.
|
||||||
// MSpan: a run of pages managed by the MHeap.
|
// MSpan: a run of pages managed by the MHeap.
|
||||||
// MHeapMap: a mapping from page IDs to MSpans.
|
// MHeapMap: a mapping from page IDs to MSpans.
|
||||||
// MHeapMapCache: a small cache of MHeapMap mapping page IDs
|
|
||||||
// to size classes for pages used for small objects.
|
|
||||||
// MCentral: a shared free list for a given size class.
|
// MCentral: a shared free list for a given size class.
|
||||||
// MCache: a per-thread (in Go, per-M) cache for small objects.
|
// MCache: a per-thread (in Go, per-M) cache for small objects.
|
||||||
// MStats: allocation statistics.
|
// MStats: allocation statistics.
|
||||||
@ -87,7 +85,6 @@ typedef struct FixAlloc FixAlloc;
|
|||||||
typedef struct MCentral MCentral;
|
typedef struct MCentral MCentral;
|
||||||
typedef struct MHeap MHeap;
|
typedef struct MHeap MHeap;
|
||||||
typedef struct MHeapMap MHeapMap;
|
typedef struct MHeapMap MHeapMap;
|
||||||
typedef struct MHeapMapCache MHeapMapCache;
|
|
||||||
typedef struct MSpan MSpan;
|
typedef struct MSpan MSpan;
|
||||||
typedef struct MStats MStats;
|
typedef struct MStats MStats;
|
||||||
typedef struct MLink MLink;
|
typedef struct MLink MLink;
|
||||||
@ -296,7 +293,6 @@ struct MHeap
|
|||||||
|
|
||||||
// span lookup
|
// span lookup
|
||||||
MHeapMap map;
|
MHeapMap map;
|
||||||
MHeapMapCache mapcache;
|
|
||||||
|
|
||||||
// range of addresses we might see in the heap
|
// range of addresses we might see in the heap
|
||||||
byte *min;
|
byte *min;
|
||||||
@ -324,7 +320,7 @@ MSpan* MHeap_LookupMaybe(MHeap *h, PageID p);
|
|||||||
void MGetSizeClassInfo(int32 sizeclass, int32 *size, int32 *npages, int32 *nobj);
|
void MGetSizeClassInfo(int32 sizeclass, int32 *size, int32 *npages, int32 *nobj);
|
||||||
|
|
||||||
void* mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed);
|
void* mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed);
|
||||||
int32 mlookup(void *v, byte **base, uintptr *size, uint32 **ref);
|
int32 mlookup(void *v, byte **base, uintptr *size, MSpan **s, uint32 **ref);
|
||||||
void gc(int32 force);
|
void gc(int32 force);
|
||||||
|
|
||||||
void* SysAlloc(uintptr);
|
void* SysAlloc(uintptr);
|
||||||
|
@ -97,7 +97,7 @@ addfinalizer(void *p, void (*f)(void*), int32 nret)
|
|||||||
uint32 *ref;
|
uint32 *ref;
|
||||||
byte *base;
|
byte *base;
|
||||||
|
|
||||||
if(!mlookup(p, &base, nil, &ref) || p != base)
|
if(!mlookup(p, &base, nil, nil, &ref) || p != base)
|
||||||
throw("addfinalizer on invalid pointer");
|
throw("addfinalizer on invalid pointer");
|
||||||
if(f == nil) {
|
if(f == nil) {
|
||||||
if(*ref & RefHasFinalizer) {
|
if(*ref & RefHasFinalizer) {
|
||||||
|
@ -65,7 +65,7 @@ scanblock(int32 depth, byte *b, int64 n)
|
|||||||
obj = vp[i];
|
obj = vp[i];
|
||||||
if(obj == nil || (byte*)obj < mheap.min || (byte*)obj >= mheap.max)
|
if(obj == nil || (byte*)obj < mheap.min || (byte*)obj >= mheap.max)
|
||||||
continue;
|
continue;
|
||||||
if(mlookup(obj, &obj, &size, &refp)) {
|
if(mlookup(obj, &obj, &size, nil, &refp)) {
|
||||||
ref = *refp;
|
ref = *refp;
|
||||||
switch(ref & ~(RefNoPointers|RefHasFinalizer)) {
|
switch(ref & ~(RefNoPointers|RefHasFinalizer)) {
|
||||||
case RefFinalize:
|
case RefFinalize:
|
||||||
|
@ -108,27 +108,11 @@ HaveSpan:
|
|||||||
MHeap_FreeLocked(h, t);
|
MHeap_FreeLocked(h, t);
|
||||||
}
|
}
|
||||||
|
|
||||||
// If span is being used for small objects, cache size class.
|
// Record span info, because gc needs to be
|
||||||
// No matter what, cache span info, because gc needs to be
|
|
||||||
// able to map interior pointer to containing span.
|
// able to map interior pointer to containing span.
|
||||||
s->sizeclass = sizeclass;
|
s->sizeclass = sizeclass;
|
||||||
for(n=0; n<npage; n++)
|
for(n=0; n<npage; n++)
|
||||||
MHeapMap_Set(&h->map, s->start+n, s);
|
MHeapMap_Set(&h->map, s->start+n, s);
|
||||||
if(sizeclass == 0) {
|
|
||||||
uintptr tmp;
|
|
||||||
|
|
||||||
// If there are entries for this span, invalidate them,
|
|
||||||
// but don't blow out cache entries about other spans.
|
|
||||||
for(n=0; n<npage; n++)
|
|
||||||
if(MHeapMapCache_GET(&h->mapcache, s->start+n, tmp) != 0)
|
|
||||||
MHeapMapCache_SET(&h->mapcache, s->start+n, 0);
|
|
||||||
} else {
|
|
||||||
// Save cache entries for this span.
|
|
||||||
// If there's a size class, there aren't that many pages.
|
|
||||||
for(n=0; n<npage; n++)
|
|
||||||
MHeapMapCache_SET(&h->mapcache, s->start+n, sizeclass);
|
|
||||||
}
|
|
||||||
|
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -39,38 +39,3 @@ MSpan* MHeapMap_GetMaybe(MHeapMap *m, PageID k);
|
|||||||
void MHeapMap_Set(MHeapMap *m, PageID k, MSpan *v);
|
void MHeapMap_Set(MHeapMap *m, PageID k, MSpan *v);
|
||||||
|
|
||||||
|
|
||||||
// Much of the time, free(v) needs to know only the size class for v,
|
|
||||||
// not which span it came from. The MHeapMap finds the size class
|
|
||||||
// by looking up the span.
|
|
||||||
//
|
|
||||||
// An MHeapMapCache is a simple direct-mapped cache translating
|
|
||||||
// page numbers to size classes. It avoids the expensive MHeapMap
|
|
||||||
// lookup for hot pages.
|
|
||||||
//
|
|
||||||
// The cache entries are 32 bits, with the page number in the low part
|
|
||||||
// and the value at the top.
|
|
||||||
//
|
|
||||||
// NOTE(rsc): On a machine with 32-bit addresses (= 20-bit page numbers),
|
|
||||||
// we can use a 16-bit cache entry by not storing the redundant 12 bits
|
|
||||||
// of the key that are used as the entry index. For now, keep it simple.
|
|
||||||
enum
|
|
||||||
{
|
|
||||||
MHeapMapCache_HashBits = 12
|
|
||||||
};
|
|
||||||
|
|
||||||
struct MHeapMapCache
|
|
||||||
{
|
|
||||||
uint32 array[1<<MHeapMapCache_HashBits];
|
|
||||||
};
|
|
||||||
|
|
||||||
// All macros for speed (sorry).
|
|
||||||
#define HMASK ((1<<MHeapMapCache_HashBits)-1)
|
|
||||||
#define KBITS MHeapMap_TotalBits
|
|
||||||
#define KMASK ((1LL<<KBITS)-1)
|
|
||||||
|
|
||||||
#define MHeapMapCache_SET(cache, key, value) \
|
|
||||||
((cache)->array[(key) & HMASK] = (key) | ((uintptr)(value) << KBITS))
|
|
||||||
|
|
||||||
#define MHeapMapCache_GET(cache, key, tmp) \
|
|
||||||
(tmp = (cache)->array[(key) & HMASK], \
|
|
||||||
(tmp & KMASK) == (key) ? (tmp >> KBITS) : 0)
|
|
||||||
|
@ -58,39 +58,3 @@ MSpan* MHeapMap_GetMaybe(MHeapMap *m, PageID k);
|
|||||||
void MHeapMap_Set(MHeapMap *m, PageID k, MSpan *v);
|
void MHeapMap_Set(MHeapMap *m, PageID k, MSpan *v);
|
||||||
|
|
||||||
|
|
||||||
// Much of the time, free(v) needs to know only the size class for v,
|
|
||||||
// not which span it came from. The MHeapMap finds the size class
|
|
||||||
// by looking up the span.
|
|
||||||
//
|
|
||||||
// An MHeapMapCache is a simple direct-mapped cache translating
|
|
||||||
// page numbers to size classes. It avoids the expensive MHeapMap
|
|
||||||
// lookup for hot pages.
|
|
||||||
//
|
|
||||||
// The cache entries are 64 bits, with the page number in the low part
|
|
||||||
// and the value at the top.
|
|
||||||
//
|
|
||||||
// NOTE(rsc): On a machine with 32-bit addresses (= 20-bit page numbers),
|
|
||||||
// we can use a 16-bit cache entry by not storing the redundant 12 bits
|
|
||||||
// of the key that are used as the entry index. Here in 64-bit land,
|
|
||||||
// that trick won't work unless the hash table has 2^28 entries.
|
|
||||||
enum
|
|
||||||
{
|
|
||||||
MHeapMapCache_HashBits = 12
|
|
||||||
};
|
|
||||||
|
|
||||||
struct MHeapMapCache
|
|
||||||
{
|
|
||||||
uintptr array[1<<MHeapMapCache_HashBits];
|
|
||||||
};
|
|
||||||
|
|
||||||
// All macros for speed (sorry).
|
|
||||||
#define HMASK ((1<<MHeapMapCache_HashBits)-1)
|
|
||||||
#define KBITS MHeapMap_TotalBits
|
|
||||||
#define KMASK ((1LL<<KBITS)-1)
|
|
||||||
|
|
||||||
#define MHeapMapCache_SET(cache, key, value) \
|
|
||||||
((cache)->array[(key) & HMASK] = (key) | ((uintptr)(value) << KBITS))
|
|
||||||
|
|
||||||
#define MHeapMapCache_GET(cache, key, tmp) \
|
|
||||||
(tmp = (cache)->array[(key) & HMASK], \
|
|
||||||
(tmp & KMASK) == (key) ? (tmp >> KBITS) : 0)
|
|
||||||
|
Loading…
Reference in New Issue
Block a user