diff --git a/src/pkg/runtime/Makefile b/src/pkg/runtime/Makefile index 134d51ac026..ab761b0d62f 100644 --- a/src/pkg/runtime/Makefile +++ b/src/pkg/runtime/Makefile @@ -30,7 +30,6 @@ GOFILES=\ hashmap_defs.go\ iface_defs.go\ malloc_defs.go\ - mheapmap$(SIZE)_defs.go\ runtime_defs.go\ $(GOOS)/runtime_defs.go\ @@ -70,7 +69,6 @@ OFILES=\ mfixalloc.$O\ mgc0.$O\ mheap.$O\ - mheapmap$(SIZE).$O\ mprof.$O\ msize.$O\ print.$O\ diff --git a/src/pkg/runtime/amd64/traceback.c b/src/pkg/runtime/amd64/traceback.c index 3ea80a66190..86e96f3488a 100644 --- a/src/pkg/runtime/amd64/traceback.c +++ b/src/pkg/runtime/amd64/traceback.c @@ -60,7 +60,7 @@ gentraceback(byte *pc0, byte *sp, G *g, int32 skip, uintptr *pcbuf, int32 m) // The 0x48 byte is only on amd64. p = (byte*)pc; // We check p < p+8 to avoid wrapping and faulting if we lose track. - if(runtime·mheap.min < p && p < p+8 && p+8 < runtime·mheap.max && // pointer in allocated memory + if(runtime·mheap.arena_start < p && p < p+8 && p+8 < runtime·mheap.arena_used && // pointer in allocated memory (sizeof(uintptr) != 8 || *p++ == 0x48) && // skip 0x48 byte on amd64 p[0] == 0x81 && p[1] == 0xc4 && p[6] == 0xc3) { sp += *(uint32*)(p+2); @@ -154,7 +154,7 @@ isclosureentry(uintptr pc) int32 i, siz; p = (byte*)pc; - if(p < runtime·mheap.min || p+32 > runtime·mheap.max) + if(p < runtime·mheap.arena_start || p+32 > runtime·mheap.arena_used) return 0; // SUBQ $siz, SP diff --git a/src/pkg/runtime/darwin/386/sys.s b/src/pkg/runtime/darwin/386/sys.s index 79bbfb68bf4..7961e369c3a 100644 --- a/src/pkg/runtime/darwin/386/sys.s +++ b/src/pkg/runtime/darwin/386/sys.s @@ -138,7 +138,7 @@ TEXT runtime·bsdthread_create(SB),7,$32 MOVL $0x1000000, 20(SP) // flags = PTHREAD_START_CUSTOM INT $0x80 JAE 3(PC) - MOVL $-1, AX + NEGL AX RET MOVL $0, AX RET diff --git a/src/pkg/runtime/darwin/amd64/sys.s b/src/pkg/runtime/darwin/amd64/sys.s index 05dbc7b93b6..bc970156a32 100644 --- a/src/pkg/runtime/darwin/amd64/sys.s +++ b/src/pkg/runtime/darwin/amd64/sys.s @@ -141,7 +141,7 @@ TEXT runtime·bsdthread_create(SB),7,$0 MOVQ $(0x2000000+360), AX // bsdthread_create SYSCALL JCC 3(PC) - MOVL $-1, AX + NEGL AX RET MOVL $0, AX RET diff --git a/src/pkg/runtime/darwin/mem.c b/src/pkg/runtime/darwin/mem.c index 7fb2c280786..cbae187180d 100644 --- a/src/pkg/runtime/darwin/mem.c +++ b/src/pkg/runtime/darwin/mem.c @@ -10,10 +10,8 @@ runtime·SysAlloc(uintptr n) mstats.sys += n; v = runtime·mmap(nil, n, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANON|MAP_PRIVATE, -1, 0); - if(v < (void*)4096) { - runtime·printf("mmap: errno=%p\n", v); - runtime·throw("mmap"); - } + if(v < (void*)4096) + return nil; return v; } @@ -32,8 +30,19 @@ runtime·SysFree(void *v, uintptr n) runtime·munmap(v, n); } +void* +runtime·SysReserve(void *v, uintptr n) +{ + return runtime·mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE, -1, 0); +} void -runtime·SysMemInit(void) +runtime·SysMap(void *v, uintptr n) { + void *p; + + mstats.sys += n; + p = runtime·mmap(v, n, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANON|MAP_FIXED|MAP_PRIVATE, -1, 0); + if(p != v) + runtime·throw("runtime: cannot map pages in arena address space"); } diff --git a/src/pkg/runtime/darwin/thread.c b/src/pkg/runtime/darwin/thread.c index d69c624128d..57e813109cc 100644 --- a/src/pkg/runtime/darwin/thread.c +++ b/src/pkg/runtime/darwin/thread.c @@ -157,13 +157,17 @@ runtime·goenvs(void) void runtime·newosproc(M *m, G *g, void *stk, void (*fn)(void)) { + int32 errno; + m->tls[0] = m->id; // so 386 asm can find it if(0){ runtime·printf("newosproc stk=%p m=%p g=%p fn=%p id=%d/%d ostk=%p\n", stk, m, g, fn, m->id, m->tls[0], &m); } - if(runtime·bsdthread_create(stk, m, g, fn) < 0) - runtime·throw("cannot create new OS thread"); + if((errno = runtime·bsdthread_create(stk, m, g, fn)) < 0) { + runtime·printf("runtime: failed to create new OS thread (have %d already; errno=%d)\n", runtime·mcount(), -errno); + runtime·throw("runtime.newosproc"); + } } // Called to initialize a new m (including the bootstrap m). diff --git a/src/pkg/runtime/debug.go b/src/pkg/runtime/debug.go index cf30374f098..d09db1be6ae 100644 --- a/src/pkg/runtime/debug.go +++ b/src/pkg/runtime/debug.go @@ -57,7 +57,6 @@ type MemStatsType struct { MSpanSys uint64 MCacheInuse uint64 // mcache structures MCacheSys uint64 - MHeapMapSys uint64 // heap map BuckHashSys uint64 // profiling bucket hash table // Garbage collector statistics. diff --git a/src/pkg/runtime/freebsd/mem.c b/src/pkg/runtime/freebsd/mem.c index 7fb2c280786..cbae187180d 100644 --- a/src/pkg/runtime/freebsd/mem.c +++ b/src/pkg/runtime/freebsd/mem.c @@ -10,10 +10,8 @@ runtime·SysAlloc(uintptr n) mstats.sys += n; v = runtime·mmap(nil, n, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANON|MAP_PRIVATE, -1, 0); - if(v < (void*)4096) { - runtime·printf("mmap: errno=%p\n", v); - runtime·throw("mmap"); - } + if(v < (void*)4096) + return nil; return v; } @@ -32,8 +30,19 @@ runtime·SysFree(void *v, uintptr n) runtime·munmap(v, n); } +void* +runtime·SysReserve(void *v, uintptr n) +{ + return runtime·mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE, -1, 0); +} void -runtime·SysMemInit(void) +runtime·SysMap(void *v, uintptr n) { + void *p; + + mstats.sys += n; + p = runtime·mmap(v, n, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANON|MAP_FIXED|MAP_PRIVATE, -1, 0); + if(p != v) + runtime·throw("runtime: cannot map pages in arena address space"); } diff --git a/src/pkg/runtime/linux/mem.c b/src/pkg/runtime/linux/mem.c index e750f97ea23..3a83e7394b0 100644 --- a/src/pkg/runtime/linux/mem.c +++ b/src/pkg/runtime/linux/mem.c @@ -12,12 +12,11 @@ runtime·SysAlloc(uintptr n) p = runtime·mmap(nil, n, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANON|MAP_PRIVATE, -1, 0); if(p < (void*)4096) { if(p == (void*)EACCES) { - runtime·printf("mmap: access denied\n"); - runtime·printf("If you're running SELinux, enable execmem for this process.\n"); + runtime·printf("runtime: mmap: access denied\n"); + runtime·printf("if you're running SELinux, enable execmem for this process.\n"); runtime·exit(2); } - runtime·printf("mmap: errno=%p\n", p); - runtime·throw("mmap"); + return nil; } return p; } @@ -37,7 +36,19 @@ runtime·SysFree(void *v, uintptr n) runtime·munmap(v, n); } -void -runtime·SysMemInit(void) +void* +runtime·SysReserve(void *v, uintptr n) { + return runtime·mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE, -1, 0); +} + +void +runtime·SysMap(void *v, uintptr n) +{ + void *p; + + mstats.sys += n; + p = runtime·mmap(v, n, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANON|MAP_FIXED|MAP_PRIVATE, -1, 0); + if(p != v) + runtime·throw("runtime: cannot map pages in arena address space"); } diff --git a/src/pkg/runtime/malloc.goc b/src/pkg/runtime/malloc.goc index a3adca358d5..cc28b943df5 100644 --- a/src/pkg/runtime/malloc.goc +++ b/src/pkg/runtime/malloc.goc @@ -175,7 +175,7 @@ runtime·mlookup(void *v, byte **base, uintptr *size, MSpan **sp, uint32 **ref) MSpan *s; mstats.nlookup++; - s = runtime·MHeap_LookupMaybe(&runtime·mheap, (uintptr)v>>PageShift); + s = runtime·MHeap_LookupMaybe(&runtime·mheap, v); if(sp) *sp = s; if(s == nil) { @@ -249,8 +249,45 @@ int32 runtime·sizeof_C_MStats = sizeof(MStats); void runtime·mallocinit(void) { - runtime·SysMemInit(); + byte *p; + uintptr arena_size; + runtime·InitSizes(); + + if(sizeof(void*) == 8) { + // On a 64-bit machine, allocate from a single contiguous reservation. + // 16 GB should be big enough for now. + // + // The code will work with the reservation at any address, but ask + // SysReserve to use 0x000000f800000000 if possible. + // Allocating a 16 GB region takes away 36 bits, and the amd64 + // doesn't let us choose the top 17 bits, so that leaves the 11 bits + // in the middle of 0x00f8 for us to choose. Choosing 0x00f8 means + // that the valid memory addresses will begin 0x00f8, 0x00f9, 0x00fa, 0x00fb. + // None of the bytes f8 f9 fa fb can appear in valid UTF-8, and + // they are otherwise as far from ff (likely a common byte) as possible. + // Choosing 0x00 for the leading 6 bits was more arbitrary, but it + // is not a common ASCII code point either. Using 0x11f8 instead + // caused out of memory errors on OS X during thread allocations. + // These choices are both for debuggability and to reduce the + // odds of the conservative garbage collector not collecting memory + // because some non-pointer block of memory had a bit pattern + // that matched a memory address. + arena_size = 16LL<<30; + p = runtime·SysReserve((void*)(0x00f8ULL<<32), arena_size); + if(p == nil) + runtime·throw("runtime: cannot reserve arena virtual address space"); + runtime·mheap.arena_start = p; + runtime·mheap.arena_used = p; + runtime·mheap.arena_end = p + arena_size; + } else { + // On a 32-bit machine, we'll take what we can get for each allocation + // and maintain arena_start and arena_end as min, max we've seen. + runtime·mheap.arena_start = (byte*)0xffffffff; + runtime·mheap.arena_end = 0; + } + + // Initialize the rest of the allocator. runtime·MHeap_Init(&runtime·mheap, runtime·SysAlloc); m->mcache = runtime·allocmcache(); @@ -258,6 +295,32 @@ runtime·mallocinit(void) runtime·free(runtime·malloc(1)); } +void* +runtime·MHeap_SysAlloc(MHeap *h, uintptr n) +{ + byte *p; + + if(sizeof(void*) == 8) { + // Keep taking from our reservation. + if(h->arena_end - h->arena_used < n) + return nil; + p = h->arena_used; + runtime·SysMap(p, n); + h->arena_used += n; + return p; + } else { + // Take what we can get from the OS. + p = runtime·SysAlloc(n); + if(p == nil) + return nil; + if(p+n > h->arena_used) + h->arena_used = p+n; + if(p > h->arena_end) + h->arena_end = p; + return p; + } +} + // Runtime stubs. void* diff --git a/src/pkg/runtime/malloc.h b/src/pkg/runtime/malloc.h index 7e750b9170d..e2472e8d238 100644 --- a/src/pkg/runtime/malloc.h +++ b/src/pkg/runtime/malloc.h @@ -19,7 +19,6 @@ // used to manage storage used by the allocator. // MHeap: the malloc heap, managed at page (4096-byte) granularity. // MSpan: a run of pages managed by the MHeap. -// MHeapMap: a mapping from page IDs to MSpans. // MCentral: a shared free list for a given size class. // MCache: a per-thread (in Go, per-M) cache for small objects. // MStats: allocation statistics. @@ -84,7 +83,6 @@ typedef struct FixAlloc FixAlloc; typedef struct MCentral MCentral; typedef struct MHeap MHeap; -typedef struct MHeapMap MHeapMap; typedef struct MSpan MSpan; typedef struct MStats MStats; typedef struct MLink MLink; @@ -108,13 +106,16 @@ enum MaxMCacheSize = 2<<20, // Maximum bytes in one MCache MaxMHeapList = 1<<(20 - PageShift), // Maximum page length for fixed-size list in MHeap. HeapAllocChunk = 1<<20, // Chunk size for heap growth -}; + // Number of bits in page to span calculations (4k pages). + // On 64-bit, we limit the arena to 16G, so 22 bits suffices. + // On 32-bit, we don't bother limiting anything: 20 bits for 4G. #ifdef _64BIT -#include "mheapmap64.h" + MHeapMap_Bits = 22, #else -#include "mheapmap32.h" + MHeapMap_Bits = 20, #endif +}; // A generic linked list of blocks. (Typically the block is bigger than sizeof(MLink).) struct MLink @@ -124,7 +125,8 @@ struct MLink // SysAlloc obtains a large chunk of zeroed memory from the // operating system, typically on the order of a hundred kilobytes -// or a megabyte. +// or a megabyte. If the pointer argument is non-nil, the caller +// wants a mapping there or nowhere. // // SysUnused notifies the operating system that the contents // of the memory region are no longer needed and can be reused @@ -134,11 +136,19 @@ struct MLink // SysFree returns it unconditionally; this is only used if // an out-of-memory error has been detected midway through // an allocation. It is okay if SysFree is a no-op. +// +// SysReserve reserves address space without allocating memory. +// If the pointer passed to it is non-nil, the caller wants the +// reservation there, but SysReserve can still choose another +// location if that one is unavailable. +// +// SysMap maps previously reserved address space for use. void* runtime·SysAlloc(uintptr nbytes); void runtime·SysFree(void *v, uintptr nbytes); void runtime·SysUnused(void *v, uintptr nbytes); -void runtime·SysMemInit(void); +void runtime·SysMap(void *v, uintptr nbytes); +void* runtime·SysReserve(void *v, uintptr nbytes); // FixAlloc is a simple free-list allocator for fixed size objects. // Malloc uses a FixAlloc wrapped around SysAlloc to manages its @@ -194,7 +204,6 @@ struct MStats uint64 mspan_sys; uint64 mcache_inuse; // MCache structures uint64 mcache_sys; - uint64 heapmap_sys; // heap map uint64 buckhash_sys; // profiling bucket hash table // Statistics about garbage collector. @@ -323,11 +332,13 @@ struct MHeap MSpan *allspans; // span lookup - MHeapMap map; + MSpan *map[1<> PageShift; - s = runtime·MHeap_Lookup(&runtime·mheap, page); + s = runtime·MHeap_Lookup(&runtime·mheap, v); if(s == nil || s->ref == 0) runtime·throw("invalid free"); diff --git a/src/pkg/runtime/mgc0.c b/src/pkg/runtime/mgc0.c index 4eace9f8312..af1c721e8b2 100644 --- a/src/pkg/runtime/mgc0.c +++ b/src/pkg/runtime/mgc0.c @@ -76,7 +76,7 @@ scanblock(byte *b, int64 n) obj = vp[i]; if(obj == nil) continue; - if(runtime·mheap.min <= (byte*)obj && (byte*)obj < runtime·mheap.max) { + if(runtime·mheap.arena_start <= (byte*)obj && (byte*)obj < runtime·mheap.arena_end) { if(runtime·mlookup(obj, &obj, &size, nil, &refp)) { ref = *refp; switch(ref & ~RefFlags) { diff --git a/src/pkg/runtime/mheap.c b/src/pkg/runtime/mheap.c index 4bb7f14e3a8..0c9ac0a0968 100644 --- a/src/pkg/runtime/mheap.c +++ b/src/pkg/runtime/mheap.c @@ -41,7 +41,6 @@ runtime·MHeap_Init(MHeap *h, void *(*alloc)(uintptr)) runtime·FixAlloc_Init(&h->spanalloc, sizeof(MSpan), alloc, RecordSpan, h); runtime·FixAlloc_Init(&h->cachealloc, sizeof(MCache), alloc, nil, nil); - runtime·MHeapMap_Init(&h->map, alloc); // h->mapcache needs no init for(i=0; ifree); i++) runtime·MSpanList_Init(&h->free[i]); @@ -79,6 +78,7 @@ MHeap_AllocLocked(MHeap *h, uintptr npage, int32 sizeclass) { uintptr n; MSpan *s, *t; + PageID p; // Try in fixed-size lists up to max. for(n=npage; n < nelem(h->free); n++) { @@ -112,18 +112,29 @@ HaveSpan: mstats.mspan_sys = h->spanalloc.sys; runtime·MSpan_Init(t, s->start + npage, s->npages - npage); s->npages = npage; - runtime·MHeapMap_Set(&h->map, t->start - 1, s); - runtime·MHeapMap_Set(&h->map, t->start, t); - runtime·MHeapMap_Set(&h->map, t->start + t->npages - 1, t); + p = t->start; + if(sizeof(void*) == 8) + p -= ((uintptr)h->arena_start>>PageShift); + if(p > 0) + h->map[p-1] = s; + h->map[p] = t; + h->map[p+t->npages-1] = t; + *(uintptr*)(t->start<start<state = MSpanInUse; MHeap_FreeLocked(h, t); } + if(*(uintptr*)(s->start<start<npages<sizeclass = sizeclass; + p = s->start; + if(sizeof(void*) == 8) + p -= ((uintptr)h->arena_start>>PageShift); for(n=0; nmap, s->start+n, s); + h->map[p+n] = s; return s; } @@ -161,6 +172,7 @@ MHeap_Grow(MHeap *h, uintptr npage) uintptr ask; void *v; MSpan *s; + PageID p; // Ask for a big chunk, to reduce the number of mappings // the operating system needs to track; also amortizes @@ -171,29 +183,21 @@ MHeap_Grow(MHeap *h, uintptr npage) if(ask < HeapAllocChunk) ask = HeapAllocChunk; - v = runtime·SysAlloc(ask); + v = runtime·MHeap_SysAlloc(h, ask); if(v == nil) { if(ask > (npage<min || h->min == nil) - h->min = v; - if((byte*)v+ask > h->max) - h->max = (byte*)v+ask; - - // NOTE(rsc): In tcmalloc, if we've accumulated enough - // system allocations, the heap map gets entirely allocated - // in 32-bit mode. (In 64-bit mode that's not practical.) - if(!runtime·MHeapMap_Preallocate(&h->map, ((uintptr)v>>PageShift) - 1, (ask>>PageShift) + 2)) { - runtime·SysFree(v, ask); - return false; - } + if((byte*)v < h->arena_start || h->arena_start == nil) + h->arena_start = v; + if((byte*)v+ask > h->arena_end) + h->arena_end = (byte*)v+ask; // Create a fake "in use" span and free it, so that the // right coalescing happens. @@ -201,35 +205,50 @@ MHeap_Grow(MHeap *h, uintptr npage) mstats.mspan_inuse = h->spanalloc.inuse; mstats.mspan_sys = h->spanalloc.sys; runtime·MSpan_Init(s, (uintptr)v>>PageShift, ask>>PageShift); - runtime·MHeapMap_Set(&h->map, s->start, s); - runtime·MHeapMap_Set(&h->map, s->start + s->npages - 1, s); + p = s->start; + if(sizeof(void*) == 8) + p -= ((uintptr)h->arena_start>>PageShift); + h->map[p] = s; + h->map[p + s->npages - 1] = s; s->state = MSpanInUse; MHeap_FreeLocked(h, s); return true; } -// Look up the span at the given page number. -// Page number is guaranteed to be in map +// Look up the span at the given address. +// Address is guaranteed to be in map // and is guaranteed to be start or end of span. MSpan* -runtime·MHeap_Lookup(MHeap *h, PageID p) +runtime·MHeap_Lookup(MHeap *h, void *v) { - return runtime·MHeapMap_Get(&h->map, p); + uintptr p; + + p = (uintptr)v; + if(sizeof(void*) == 8) + p -= (uintptr)h->arena_start; + return h->map[p >> PageShift]; } -// Look up the span at the given page number. -// Page number is *not* guaranteed to be in map +// Look up the span at the given address. +// Address is *not* guaranteed to be in map // and may be anywhere in the span. // Map entries for the middle of a span are only // valid for allocated spans. Free spans may have // other garbage in their middles, so we have to // check for that. MSpan* -runtime·MHeap_LookupMaybe(MHeap *h, PageID p) +runtime·MHeap_LookupMaybe(MHeap *h, void *v) { MSpan *s; + PageID p, q; - s = runtime·MHeapMap_GetMaybe(&h->map, p); + if((byte*)v < h->arena_start || (byte*)v >= h->arena_used) + return nil; + p = (uintptr)v>>PageShift; + q = p; + if(sizeof(void*) == 8) + q -= (uintptr)h->arena_start >> PageShift; + s = h->map[q]; if(s == nil || p < s->start || p - s->start >= s->npages) return nil; if(s->state != MSpanInUse) @@ -258,7 +277,9 @@ runtime·MHeap_Free(MHeap *h, MSpan *s, int32 acct) static void MHeap_FreeLocked(MHeap *h, MSpan *s) { + uintptr *sp, *tp; MSpan *t; + PageID p; if(s->state != MSpanInUse || s->ref != 0) { runtime·printf("MHeap_FreeLocked - span %p ptr %p state %d ref %d\n", s, s->start<state, s->ref); @@ -266,21 +287,30 @@ MHeap_FreeLocked(MHeap *h, MSpan *s) } s->state = MSpanFree; runtime·MSpanList_Remove(s); + sp = (uintptr*)(s->start<map, s->start - 1)) != nil && t->state != MSpanInUse) { + p = s->start; + if(sizeof(void*) == 8) + p -= (uintptr)h->arena_start >> PageShift; + if(p > 0 && (t = h->map[p-1]) != nil && t->state != MSpanInUse) { + tp = (uintptr*)(t->start<start = t->start; s->npages += t->npages; - runtime·MHeapMap_Set(&h->map, s->start, s); + p -= t->npages; + h->map[p] = s; runtime·MSpanList_Remove(t); t->state = MSpanDead; runtime·FixAlloc_Free(&h->spanalloc, t); mstats.mspan_inuse = h->spanalloc.inuse; mstats.mspan_sys = h->spanalloc.sys; } - if((t = runtime·MHeapMap_Get(&h->map, s->start + s->npages)) != nil && t->state != MSpanInUse) { + if(p+s->npages < nelem(h->map) && (t = h->map[p+s->npages]) != nil && t->state != MSpanInUse) { + tp = (uintptr*)(t->start<npages += t->npages; - runtime·MHeapMap_Set(&h->map, s->start + s->npages - 1, s); + h->map[p + s->npages - 1] = s; runtime·MSpanList_Remove(t); t->state = MSpanDead; runtime·FixAlloc_Free(&h->spanalloc, t); diff --git a/src/pkg/runtime/mheapmap32.c b/src/pkg/runtime/mheapmap32.c deleted file mode 100644 index 323f8b87a36..00000000000 --- a/src/pkg/runtime/mheapmap32.c +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Heap map, 32-bit version -// See malloc.h and mheap.c for overview. - -#include "runtime.h" -#include "malloc.h" - -// 3-level radix tree mapping page ids to Span*. -void -runtime·MHeapMap_Init(MHeapMap *m, void *(*allocator)(uintptr)) -{ - m->allocator = allocator; -} - -MSpan* -runtime·MHeapMap_Get(MHeapMap *m, PageID k) -{ - int32 i1, i2; - - i2 = k & MHeapMap_Level2Mask; - k >>= MHeapMap_Level2Bits; - i1 = k & MHeapMap_Level1Mask; - k >>= MHeapMap_Level1Bits; - if(k != 0) - runtime·throw("MHeapMap_Get"); - - return m->p[i1]->s[i2]; -} - -MSpan* -runtime·MHeapMap_GetMaybe(MHeapMap *m, PageID k) -{ - int32 i1, i2; - MHeapMapNode2 *p2; - - i2 = k & MHeapMap_Level2Mask; - k >>= MHeapMap_Level2Bits; - i1 = k & MHeapMap_Level1Mask; - k >>= MHeapMap_Level1Bits; - if(k != 0) - runtime·throw("MHeapMap_Get"); - - p2 = m->p[i1]; - if(p2 == nil) - return nil; - return p2->s[i2]; -} - -void -runtime·MHeapMap_Set(MHeapMap *m, PageID k, MSpan *s) -{ - int32 i1, i2; - - i2 = k & MHeapMap_Level2Mask; - k >>= MHeapMap_Level2Bits; - i1 = k & MHeapMap_Level1Mask; - k >>= MHeapMap_Level1Bits; - if(k != 0) - runtime·throw("MHeapMap_Set"); - - m->p[i1]->s[i2] = s; -} - -// Allocate the storage required for entries [k, k+1, ..., k+len-1] -// so that Get and Set calls need not check for nil pointers. -bool -runtime·MHeapMap_Preallocate(MHeapMap *m, PageID k, uintptr len) -{ - uintptr end; - int32 i1; - MHeapMapNode2 *p2; - - end = k+len; - while(k < end) { - if((k >> MHeapMap_TotalBits) != 0) - return false; - i1 = (k >> MHeapMap_Level2Bits) & MHeapMap_Level1Mask; - - // first-level pointer - if(m->p[i1] == nil) { - p2 = m->allocator(sizeof *p2); - if(p2 == nil) - return false; - mstats.heapmap_sys += sizeof *p2; - m->p[i1] = p2; - } - - // advance key past this leaf node - k = ((k >> MHeapMap_Level2Bits) + 1) << MHeapMap_Level2Bits; - } - return true; -} - diff --git a/src/pkg/runtime/mheapmap32.h b/src/pkg/runtime/mheapmap32.h deleted file mode 100644 index 29e6190717a..00000000000 --- a/src/pkg/runtime/mheapmap32.h +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Free(v) must be able to determine the MSpan containing v. -// The MHeapMap is a 2-level radix tree mapping page numbers to MSpans. - -typedef struct MHeapMapNode2 MHeapMapNode2; - -enum -{ - // 32 bit address - 12 bit page size = 20 bits to map - MHeapMap_Level1Bits = 10, - MHeapMap_Level2Bits = 10, - - MHeapMap_TotalBits = - MHeapMap_Level1Bits + - MHeapMap_Level2Bits, - - MHeapMap_Level1Mask = (1<allocator = allocator; -} - -MSpan* -runtime·MHeapMap_Get(MHeapMap *m, PageID k) -{ - int32 i1, i2, i3; - - i3 = k & MHeapMap_Level3Mask; - k >>= MHeapMap_Level3Bits; - i2 = k & MHeapMap_Level2Mask; - k >>= MHeapMap_Level2Bits; - i1 = k & MHeapMap_Level1Mask; - k >>= MHeapMap_Level1Bits; - if(k != 0) - runtime·throw("MHeapMap_Get"); - - return m->p[i1]->p[i2]->s[i3]; -} - -MSpan* -runtime·MHeapMap_GetMaybe(MHeapMap *m, PageID k) -{ - int32 i1, i2, i3; - MHeapMapNode2 *p2; - MHeapMapNode3 *p3; - - i3 = k & MHeapMap_Level3Mask; - k >>= MHeapMap_Level3Bits; - i2 = k & MHeapMap_Level2Mask; - k >>= MHeapMap_Level2Bits; - i1 = k & MHeapMap_Level1Mask; - k >>= MHeapMap_Level1Bits; - if(k != 0) - runtime·throw("MHeapMap_Get"); - - p2 = m->p[i1]; - if(p2 == nil) - return nil; - p3 = p2->p[i2]; - if(p3 == nil) - return nil; - return p3->s[i3]; -} - -void -runtime·MHeapMap_Set(MHeapMap *m, PageID k, MSpan *s) -{ - int32 i1, i2, i3; - - i3 = k & MHeapMap_Level3Mask; - k >>= MHeapMap_Level3Bits; - i2 = k & MHeapMap_Level2Mask; - k >>= MHeapMap_Level2Bits; - i1 = k & MHeapMap_Level1Mask; - k >>= MHeapMap_Level1Bits; - if(k != 0) - runtime·throw("MHeapMap_Set"); - - m->p[i1]->p[i2]->s[i3] = s; -} - -// Allocate the storage required for entries [k, k+1, ..., k+len-1] -// so that Get and Set calls need not check for nil pointers. -bool -runtime·MHeapMap_Preallocate(MHeapMap *m, PageID k, uintptr len) -{ - uintptr end; - int32 i1, i2; - MHeapMapNode2 *p2; - MHeapMapNode3 *p3; - - end = k+len; - while(k < end) { - if((k >> MHeapMap_TotalBits) != 0) - return false; - i2 = (k >> MHeapMap_Level3Bits) & MHeapMap_Level2Mask; - i1 = (k >> (MHeapMap_Level3Bits + MHeapMap_Level2Bits)) & MHeapMap_Level1Mask; - - // first-level pointer - if((p2 = m->p[i1]) == nil) { - p2 = m->allocator(sizeof *p2); - if(p2 == nil) - return false; - mstats.heapmap_sys += sizeof *p2; - m->p[i1] = p2; - } - - // second-level pointer - if(p2->p[i2] == nil) { - p3 = m->allocator(sizeof *p3); - if(p3 == nil) - return false; - mstats.heapmap_sys += sizeof *p3; - p2->p[i2] = p3; - } - - // advance key past this leaf node - k = ((k >> MHeapMap_Level3Bits) + 1) << MHeapMap_Level3Bits; - } - return true; -} - diff --git a/src/pkg/runtime/mheapmap64.h b/src/pkg/runtime/mheapmap64.h deleted file mode 100644 index a9934d2b15b..00000000000 --- a/src/pkg/runtime/mheapmap64.h +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Free(v) must be able to determine the MSpan containing v. -// The MHeapMap is a 3-level radix tree mapping page numbers to MSpans. -// -// NOTE(rsc): On a 32-bit platform (= 20-bit page numbers), -// we can swap in a 2-level radix tree. -// -// NOTE(rsc): We use a 3-level tree because tcmalloc does, but -// having only three levels requires approximately 1 MB per node -// in the tree, making the minimum map footprint 3 MB. -// Using a 4-level tree would cut the minimum footprint to 256 kB. -// On the other hand, it's just virtual address space: most of -// the memory is never going to be touched, thus never paged in. - -typedef struct MHeapMapNode2 MHeapMapNode2; -typedef struct MHeapMapNode3 MHeapMapNode3; - -enum -{ - // 64 bit address - 12 bit page size = 52 bits to map - MHeapMap_Level1Bits = 18, - MHeapMap_Level2Bits = 18, - MHeapMap_Level3Bits = 16, - - MHeapMap_TotalBits = - MHeapMap_Level1Bits + - MHeapMap_Level2Bits + - MHeapMap_Level3Bits, - - MHeapMap_Level1Mask = (1<