mirror of
https://github.com/golang/go
synced 2024-11-22 01:14:40 -07:00
runtime: simpler heap map, memory allocation
The old heap maps used a multilevel table, but that was overkill: there are only 1M entries on a 32-bit machine and we can arrange to use a dense address range on a 64-bit machine. The heap map is in bss. The assumption is that if we don't touch the pages they won't be mapped in. Also moved some duplicated memory allocation code out of the OS-specific files. R=r CC=golang-dev https://golang.org/cl/4118042
This commit is contained in:
parent
50f574515c
commit
4608feb18b
@ -30,7 +30,6 @@ GOFILES=\
|
|||||||
hashmap_defs.go\
|
hashmap_defs.go\
|
||||||
iface_defs.go\
|
iface_defs.go\
|
||||||
malloc_defs.go\
|
malloc_defs.go\
|
||||||
mheapmap$(SIZE)_defs.go\
|
|
||||||
runtime_defs.go\
|
runtime_defs.go\
|
||||||
$(GOOS)/runtime_defs.go\
|
$(GOOS)/runtime_defs.go\
|
||||||
|
|
||||||
@ -70,7 +69,6 @@ OFILES=\
|
|||||||
mfixalloc.$O\
|
mfixalloc.$O\
|
||||||
mgc0.$O\
|
mgc0.$O\
|
||||||
mheap.$O\
|
mheap.$O\
|
||||||
mheapmap$(SIZE).$O\
|
|
||||||
mprof.$O\
|
mprof.$O\
|
||||||
msize.$O\
|
msize.$O\
|
||||||
print.$O\
|
print.$O\
|
||||||
|
@ -60,7 +60,7 @@ gentraceback(byte *pc0, byte *sp, G *g, int32 skip, uintptr *pcbuf, int32 m)
|
|||||||
// The 0x48 byte is only on amd64.
|
// The 0x48 byte is only on amd64.
|
||||||
p = (byte*)pc;
|
p = (byte*)pc;
|
||||||
// We check p < p+8 to avoid wrapping and faulting if we lose track.
|
// We check p < p+8 to avoid wrapping and faulting if we lose track.
|
||||||
if(runtime·mheap.min < p && p < p+8 && p+8 < runtime·mheap.max && // pointer in allocated memory
|
if(runtime·mheap.arena_start < p && p < p+8 && p+8 < runtime·mheap.arena_used && // pointer in allocated memory
|
||||||
(sizeof(uintptr) != 8 || *p++ == 0x48) && // skip 0x48 byte on amd64
|
(sizeof(uintptr) != 8 || *p++ == 0x48) && // skip 0x48 byte on amd64
|
||||||
p[0] == 0x81 && p[1] == 0xc4 && p[6] == 0xc3) {
|
p[0] == 0x81 && p[1] == 0xc4 && p[6] == 0xc3) {
|
||||||
sp += *(uint32*)(p+2);
|
sp += *(uint32*)(p+2);
|
||||||
@ -154,7 +154,7 @@ isclosureentry(uintptr pc)
|
|||||||
int32 i, siz;
|
int32 i, siz;
|
||||||
|
|
||||||
p = (byte*)pc;
|
p = (byte*)pc;
|
||||||
if(p < runtime·mheap.min || p+32 > runtime·mheap.max)
|
if(p < runtime·mheap.arena_start || p+32 > runtime·mheap.arena_used)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
// SUBQ $siz, SP
|
// SUBQ $siz, SP
|
||||||
|
@ -138,7 +138,7 @@ TEXT runtime·bsdthread_create(SB),7,$32
|
|||||||
MOVL $0x1000000, 20(SP) // flags = PTHREAD_START_CUSTOM
|
MOVL $0x1000000, 20(SP) // flags = PTHREAD_START_CUSTOM
|
||||||
INT $0x80
|
INT $0x80
|
||||||
JAE 3(PC)
|
JAE 3(PC)
|
||||||
MOVL $-1, AX
|
NEGL AX
|
||||||
RET
|
RET
|
||||||
MOVL $0, AX
|
MOVL $0, AX
|
||||||
RET
|
RET
|
||||||
|
@ -141,7 +141,7 @@ TEXT runtime·bsdthread_create(SB),7,$0
|
|||||||
MOVQ $(0x2000000+360), AX // bsdthread_create
|
MOVQ $(0x2000000+360), AX // bsdthread_create
|
||||||
SYSCALL
|
SYSCALL
|
||||||
JCC 3(PC)
|
JCC 3(PC)
|
||||||
MOVL $-1, AX
|
NEGL AX
|
||||||
RET
|
RET
|
||||||
MOVL $0, AX
|
MOVL $0, AX
|
||||||
RET
|
RET
|
||||||
|
@ -10,10 +10,8 @@ runtime·SysAlloc(uintptr n)
|
|||||||
|
|
||||||
mstats.sys += n;
|
mstats.sys += n;
|
||||||
v = runtime·mmap(nil, n, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANON|MAP_PRIVATE, -1, 0);
|
v = runtime·mmap(nil, n, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANON|MAP_PRIVATE, -1, 0);
|
||||||
if(v < (void*)4096) {
|
if(v < (void*)4096)
|
||||||
runtime·printf("mmap: errno=%p\n", v);
|
return nil;
|
||||||
runtime·throw("mmap");
|
|
||||||
}
|
|
||||||
return v;
|
return v;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -32,8 +30,19 @@ runtime·SysFree(void *v, uintptr n)
|
|||||||
runtime·munmap(v, n);
|
runtime·munmap(v, n);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void*
|
||||||
|
runtime·SysReserve(void *v, uintptr n)
|
||||||
|
{
|
||||||
|
return runtime·mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE, -1, 0);
|
||||||
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
runtime·SysMemInit(void)
|
runtime·SysMap(void *v, uintptr n)
|
||||||
{
|
{
|
||||||
|
void *p;
|
||||||
|
|
||||||
|
mstats.sys += n;
|
||||||
|
p = runtime·mmap(v, n, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANON|MAP_FIXED|MAP_PRIVATE, -1, 0);
|
||||||
|
if(p != v)
|
||||||
|
runtime·throw("runtime: cannot map pages in arena address space");
|
||||||
}
|
}
|
||||||
|
@ -157,13 +157,17 @@ runtime·goenvs(void)
|
|||||||
void
|
void
|
||||||
runtime·newosproc(M *m, G *g, void *stk, void (*fn)(void))
|
runtime·newosproc(M *m, G *g, void *stk, void (*fn)(void))
|
||||||
{
|
{
|
||||||
|
int32 errno;
|
||||||
|
|
||||||
m->tls[0] = m->id; // so 386 asm can find it
|
m->tls[0] = m->id; // so 386 asm can find it
|
||||||
if(0){
|
if(0){
|
||||||
runtime·printf("newosproc stk=%p m=%p g=%p fn=%p id=%d/%d ostk=%p\n",
|
runtime·printf("newosproc stk=%p m=%p g=%p fn=%p id=%d/%d ostk=%p\n",
|
||||||
stk, m, g, fn, m->id, m->tls[0], &m);
|
stk, m, g, fn, m->id, m->tls[0], &m);
|
||||||
}
|
}
|
||||||
if(runtime·bsdthread_create(stk, m, g, fn) < 0)
|
if((errno = runtime·bsdthread_create(stk, m, g, fn)) < 0) {
|
||||||
runtime·throw("cannot create new OS thread");
|
runtime·printf("runtime: failed to create new OS thread (have %d already; errno=%d)\n", runtime·mcount(), -errno);
|
||||||
|
runtime·throw("runtime.newosproc");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Called to initialize a new m (including the bootstrap m).
|
// Called to initialize a new m (including the bootstrap m).
|
||||||
|
@ -57,7 +57,6 @@ type MemStatsType struct {
|
|||||||
MSpanSys uint64
|
MSpanSys uint64
|
||||||
MCacheInuse uint64 // mcache structures
|
MCacheInuse uint64 // mcache structures
|
||||||
MCacheSys uint64
|
MCacheSys uint64
|
||||||
MHeapMapSys uint64 // heap map
|
|
||||||
BuckHashSys uint64 // profiling bucket hash table
|
BuckHashSys uint64 // profiling bucket hash table
|
||||||
|
|
||||||
// Garbage collector statistics.
|
// Garbage collector statistics.
|
||||||
|
@ -10,10 +10,8 @@ runtime·SysAlloc(uintptr n)
|
|||||||
|
|
||||||
mstats.sys += n;
|
mstats.sys += n;
|
||||||
v = runtime·mmap(nil, n, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANON|MAP_PRIVATE, -1, 0);
|
v = runtime·mmap(nil, n, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANON|MAP_PRIVATE, -1, 0);
|
||||||
if(v < (void*)4096) {
|
if(v < (void*)4096)
|
||||||
runtime·printf("mmap: errno=%p\n", v);
|
return nil;
|
||||||
runtime·throw("mmap");
|
|
||||||
}
|
|
||||||
return v;
|
return v;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -32,8 +30,19 @@ runtime·SysFree(void *v, uintptr n)
|
|||||||
runtime·munmap(v, n);
|
runtime·munmap(v, n);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void*
|
||||||
|
runtime·SysReserve(void *v, uintptr n)
|
||||||
|
{
|
||||||
|
return runtime·mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE, -1, 0);
|
||||||
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
runtime·SysMemInit(void)
|
runtime·SysMap(void *v, uintptr n)
|
||||||
{
|
{
|
||||||
|
void *p;
|
||||||
|
|
||||||
|
mstats.sys += n;
|
||||||
|
p = runtime·mmap(v, n, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANON|MAP_FIXED|MAP_PRIVATE, -1, 0);
|
||||||
|
if(p != v)
|
||||||
|
runtime·throw("runtime: cannot map pages in arena address space");
|
||||||
}
|
}
|
||||||
|
@ -12,12 +12,11 @@ runtime·SysAlloc(uintptr n)
|
|||||||
p = runtime·mmap(nil, n, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANON|MAP_PRIVATE, -1, 0);
|
p = runtime·mmap(nil, n, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANON|MAP_PRIVATE, -1, 0);
|
||||||
if(p < (void*)4096) {
|
if(p < (void*)4096) {
|
||||||
if(p == (void*)EACCES) {
|
if(p == (void*)EACCES) {
|
||||||
runtime·printf("mmap: access denied\n");
|
runtime·printf("runtime: mmap: access denied\n");
|
||||||
runtime·printf("If you're running SELinux, enable execmem for this process.\n");
|
runtime·printf("if you're running SELinux, enable execmem for this process.\n");
|
||||||
runtime·exit(2);
|
runtime·exit(2);
|
||||||
}
|
}
|
||||||
runtime·printf("mmap: errno=%p\n", p);
|
return nil;
|
||||||
runtime·throw("mmap");
|
|
||||||
}
|
}
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
@ -37,7 +36,19 @@ runtime·SysFree(void *v, uintptr n)
|
|||||||
runtime·munmap(v, n);
|
runtime·munmap(v, n);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void*
|
||||||
runtime·SysMemInit(void)
|
runtime·SysReserve(void *v, uintptr n)
|
||||||
{
|
{
|
||||||
|
return runtime·mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE, -1, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
runtime·SysMap(void *v, uintptr n)
|
||||||
|
{
|
||||||
|
void *p;
|
||||||
|
|
||||||
|
mstats.sys += n;
|
||||||
|
p = runtime·mmap(v, n, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANON|MAP_FIXED|MAP_PRIVATE, -1, 0);
|
||||||
|
if(p != v)
|
||||||
|
runtime·throw("runtime: cannot map pages in arena address space");
|
||||||
}
|
}
|
||||||
|
@ -175,7 +175,7 @@ runtime·mlookup(void *v, byte **base, uintptr *size, MSpan **sp, uint32 **ref)
|
|||||||
MSpan *s;
|
MSpan *s;
|
||||||
|
|
||||||
mstats.nlookup++;
|
mstats.nlookup++;
|
||||||
s = runtime·MHeap_LookupMaybe(&runtime·mheap, (uintptr)v>>PageShift);
|
s = runtime·MHeap_LookupMaybe(&runtime·mheap, v);
|
||||||
if(sp)
|
if(sp)
|
||||||
*sp = s;
|
*sp = s;
|
||||||
if(s == nil) {
|
if(s == nil) {
|
||||||
@ -249,8 +249,45 @@ int32 runtime·sizeof_C_MStats = sizeof(MStats);
|
|||||||
void
|
void
|
||||||
runtime·mallocinit(void)
|
runtime·mallocinit(void)
|
||||||
{
|
{
|
||||||
runtime·SysMemInit();
|
byte *p;
|
||||||
|
uintptr arena_size;
|
||||||
|
|
||||||
runtime·InitSizes();
|
runtime·InitSizes();
|
||||||
|
|
||||||
|
if(sizeof(void*) == 8) {
|
||||||
|
// On a 64-bit machine, allocate from a single contiguous reservation.
|
||||||
|
// 16 GB should be big enough for now.
|
||||||
|
//
|
||||||
|
// The code will work with the reservation at any address, but ask
|
||||||
|
// SysReserve to use 0x000000f800000000 if possible.
|
||||||
|
// Allocating a 16 GB region takes away 36 bits, and the amd64
|
||||||
|
// doesn't let us choose the top 17 bits, so that leaves the 11 bits
|
||||||
|
// in the middle of 0x00f8 for us to choose. Choosing 0x00f8 means
|
||||||
|
// that the valid memory addresses will begin 0x00f8, 0x00f9, 0x00fa, 0x00fb.
|
||||||
|
// None of the bytes f8 f9 fa fb can appear in valid UTF-8, and
|
||||||
|
// they are otherwise as far from ff (likely a common byte) as possible.
|
||||||
|
// Choosing 0x00 for the leading 6 bits was more arbitrary, but it
|
||||||
|
// is not a common ASCII code point either. Using 0x11f8 instead
|
||||||
|
// caused out of memory errors on OS X during thread allocations.
|
||||||
|
// These choices are both for debuggability and to reduce the
|
||||||
|
// odds of the conservative garbage collector not collecting memory
|
||||||
|
// because some non-pointer block of memory had a bit pattern
|
||||||
|
// that matched a memory address.
|
||||||
|
arena_size = 16LL<<30;
|
||||||
|
p = runtime·SysReserve((void*)(0x00f8ULL<<32), arena_size);
|
||||||
|
if(p == nil)
|
||||||
|
runtime·throw("runtime: cannot reserve arena virtual address space");
|
||||||
|
runtime·mheap.arena_start = p;
|
||||||
|
runtime·mheap.arena_used = p;
|
||||||
|
runtime·mheap.arena_end = p + arena_size;
|
||||||
|
} else {
|
||||||
|
// On a 32-bit machine, we'll take what we can get for each allocation
|
||||||
|
// and maintain arena_start and arena_end as min, max we've seen.
|
||||||
|
runtime·mheap.arena_start = (byte*)0xffffffff;
|
||||||
|
runtime·mheap.arena_end = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize the rest of the allocator.
|
||||||
runtime·MHeap_Init(&runtime·mheap, runtime·SysAlloc);
|
runtime·MHeap_Init(&runtime·mheap, runtime·SysAlloc);
|
||||||
m->mcache = runtime·allocmcache();
|
m->mcache = runtime·allocmcache();
|
||||||
|
|
||||||
@ -258,6 +295,32 @@ runtime·mallocinit(void)
|
|||||||
runtime·free(runtime·malloc(1));
|
runtime·free(runtime·malloc(1));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void*
|
||||||
|
runtime·MHeap_SysAlloc(MHeap *h, uintptr n)
|
||||||
|
{
|
||||||
|
byte *p;
|
||||||
|
|
||||||
|
if(sizeof(void*) == 8) {
|
||||||
|
// Keep taking from our reservation.
|
||||||
|
if(h->arena_end - h->arena_used < n)
|
||||||
|
return nil;
|
||||||
|
p = h->arena_used;
|
||||||
|
runtime·SysMap(p, n);
|
||||||
|
h->arena_used += n;
|
||||||
|
return p;
|
||||||
|
} else {
|
||||||
|
// Take what we can get from the OS.
|
||||||
|
p = runtime·SysAlloc(n);
|
||||||
|
if(p == nil)
|
||||||
|
return nil;
|
||||||
|
if(p+n > h->arena_used)
|
||||||
|
h->arena_used = p+n;
|
||||||
|
if(p > h->arena_end)
|
||||||
|
h->arena_end = p;
|
||||||
|
return p;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Runtime stubs.
|
// Runtime stubs.
|
||||||
|
|
||||||
void*
|
void*
|
||||||
|
@ -19,7 +19,6 @@
|
|||||||
// used to manage storage used by the allocator.
|
// used to manage storage used by the allocator.
|
||||||
// MHeap: the malloc heap, managed at page (4096-byte) granularity.
|
// MHeap: the malloc heap, managed at page (4096-byte) granularity.
|
||||||
// MSpan: a run of pages managed by the MHeap.
|
// MSpan: a run of pages managed by the MHeap.
|
||||||
// MHeapMap: a mapping from page IDs to MSpans.
|
|
||||||
// MCentral: a shared free list for a given size class.
|
// MCentral: a shared free list for a given size class.
|
||||||
// MCache: a per-thread (in Go, per-M) cache for small objects.
|
// MCache: a per-thread (in Go, per-M) cache for small objects.
|
||||||
// MStats: allocation statistics.
|
// MStats: allocation statistics.
|
||||||
@ -84,7 +83,6 @@
|
|||||||
typedef struct FixAlloc FixAlloc;
|
typedef struct FixAlloc FixAlloc;
|
||||||
typedef struct MCentral MCentral;
|
typedef struct MCentral MCentral;
|
||||||
typedef struct MHeap MHeap;
|
typedef struct MHeap MHeap;
|
||||||
typedef struct MHeapMap MHeapMap;
|
|
||||||
typedef struct MSpan MSpan;
|
typedef struct MSpan MSpan;
|
||||||
typedef struct MStats MStats;
|
typedef struct MStats MStats;
|
||||||
typedef struct MLink MLink;
|
typedef struct MLink MLink;
|
||||||
@ -108,13 +106,16 @@ enum
|
|||||||
MaxMCacheSize = 2<<20, // Maximum bytes in one MCache
|
MaxMCacheSize = 2<<20, // Maximum bytes in one MCache
|
||||||
MaxMHeapList = 1<<(20 - PageShift), // Maximum page length for fixed-size list in MHeap.
|
MaxMHeapList = 1<<(20 - PageShift), // Maximum page length for fixed-size list in MHeap.
|
||||||
HeapAllocChunk = 1<<20, // Chunk size for heap growth
|
HeapAllocChunk = 1<<20, // Chunk size for heap growth
|
||||||
};
|
|
||||||
|
|
||||||
|
// Number of bits in page to span calculations (4k pages).
|
||||||
|
// On 64-bit, we limit the arena to 16G, so 22 bits suffices.
|
||||||
|
// On 32-bit, we don't bother limiting anything: 20 bits for 4G.
|
||||||
#ifdef _64BIT
|
#ifdef _64BIT
|
||||||
#include "mheapmap64.h"
|
MHeapMap_Bits = 22,
|
||||||
#else
|
#else
|
||||||
#include "mheapmap32.h"
|
MHeapMap_Bits = 20,
|
||||||
#endif
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
// A generic linked list of blocks. (Typically the block is bigger than sizeof(MLink).)
|
// A generic linked list of blocks. (Typically the block is bigger than sizeof(MLink).)
|
||||||
struct MLink
|
struct MLink
|
||||||
@ -124,7 +125,8 @@ struct MLink
|
|||||||
|
|
||||||
// SysAlloc obtains a large chunk of zeroed memory from the
|
// SysAlloc obtains a large chunk of zeroed memory from the
|
||||||
// operating system, typically on the order of a hundred kilobytes
|
// operating system, typically on the order of a hundred kilobytes
|
||||||
// or a megabyte.
|
// or a megabyte. If the pointer argument is non-nil, the caller
|
||||||
|
// wants a mapping there or nowhere.
|
||||||
//
|
//
|
||||||
// SysUnused notifies the operating system that the contents
|
// SysUnused notifies the operating system that the contents
|
||||||
// of the memory region are no longer needed and can be reused
|
// of the memory region are no longer needed and can be reused
|
||||||
@ -134,11 +136,19 @@ struct MLink
|
|||||||
// SysFree returns it unconditionally; this is only used if
|
// SysFree returns it unconditionally; this is only used if
|
||||||
// an out-of-memory error has been detected midway through
|
// an out-of-memory error has been detected midway through
|
||||||
// an allocation. It is okay if SysFree is a no-op.
|
// an allocation. It is okay if SysFree is a no-op.
|
||||||
|
//
|
||||||
|
// SysReserve reserves address space without allocating memory.
|
||||||
|
// If the pointer passed to it is non-nil, the caller wants the
|
||||||
|
// reservation there, but SysReserve can still choose another
|
||||||
|
// location if that one is unavailable.
|
||||||
|
//
|
||||||
|
// SysMap maps previously reserved address space for use.
|
||||||
|
|
||||||
void* runtime·SysAlloc(uintptr nbytes);
|
void* runtime·SysAlloc(uintptr nbytes);
|
||||||
void runtime·SysFree(void *v, uintptr nbytes);
|
void runtime·SysFree(void *v, uintptr nbytes);
|
||||||
void runtime·SysUnused(void *v, uintptr nbytes);
|
void runtime·SysUnused(void *v, uintptr nbytes);
|
||||||
void runtime·SysMemInit(void);
|
void runtime·SysMap(void *v, uintptr nbytes);
|
||||||
|
void* runtime·SysReserve(void *v, uintptr nbytes);
|
||||||
|
|
||||||
// FixAlloc is a simple free-list allocator for fixed size objects.
|
// FixAlloc is a simple free-list allocator for fixed size objects.
|
||||||
// Malloc uses a FixAlloc wrapped around SysAlloc to manages its
|
// Malloc uses a FixAlloc wrapped around SysAlloc to manages its
|
||||||
@ -194,7 +204,6 @@ struct MStats
|
|||||||
uint64 mspan_sys;
|
uint64 mspan_sys;
|
||||||
uint64 mcache_inuse; // MCache structures
|
uint64 mcache_inuse; // MCache structures
|
||||||
uint64 mcache_sys;
|
uint64 mcache_sys;
|
||||||
uint64 heapmap_sys; // heap map
|
|
||||||
uint64 buckhash_sys; // profiling bucket hash table
|
uint64 buckhash_sys; // profiling bucket hash table
|
||||||
|
|
||||||
// Statistics about garbage collector.
|
// Statistics about garbage collector.
|
||||||
@ -323,11 +332,13 @@ struct MHeap
|
|||||||
MSpan *allspans;
|
MSpan *allspans;
|
||||||
|
|
||||||
// span lookup
|
// span lookup
|
||||||
MHeapMap map;
|
MSpan *map[1<<MHeapMap_Bits];
|
||||||
|
|
||||||
// range of addresses we might see in the heap
|
// range of addresses we might see in the heap
|
||||||
byte *min;
|
byte *bitmap;
|
||||||
byte *max;
|
byte *arena_start;
|
||||||
|
byte *arena_used;
|
||||||
|
byte *arena_end;
|
||||||
|
|
||||||
// central free lists for small size classes.
|
// central free lists for small size classes.
|
||||||
// the union makes sure that the MCentrals are
|
// the union makes sure that the MCentrals are
|
||||||
@ -346,18 +357,15 @@ extern MHeap runtime·mheap;
|
|||||||
void runtime·MHeap_Init(MHeap *h, void *(*allocator)(uintptr));
|
void runtime·MHeap_Init(MHeap *h, void *(*allocator)(uintptr));
|
||||||
MSpan* runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct);
|
MSpan* runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct);
|
||||||
void runtime·MHeap_Free(MHeap *h, MSpan *s, int32 acct);
|
void runtime·MHeap_Free(MHeap *h, MSpan *s, int32 acct);
|
||||||
MSpan* runtime·MHeap_Lookup(MHeap *h, PageID p);
|
MSpan* runtime·MHeap_Lookup(MHeap *h, void *v);
|
||||||
MSpan* runtime·MHeap_LookupMaybe(MHeap *h, PageID p);
|
MSpan* runtime·MHeap_LookupMaybe(MHeap *h, void *v);
|
||||||
void runtime·MGetSizeClassInfo(int32 sizeclass, int32 *size, int32 *npages, int32 *nobj);
|
void runtime·MGetSizeClassInfo(int32 sizeclass, int32 *size, int32 *npages, int32 *nobj);
|
||||||
|
void* runtime·MHeap_SysAlloc(MHeap *h, uintptr n);
|
||||||
|
|
||||||
void* runtime·mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed);
|
void* runtime·mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed);
|
||||||
int32 runtime·mlookup(void *v, byte **base, uintptr *size, MSpan **s, uint32 **ref);
|
int32 runtime·mlookup(void *v, byte **base, uintptr *size, MSpan **s, uint32 **ref);
|
||||||
void runtime·gc(int32 force);
|
void runtime·gc(int32 force);
|
||||||
|
|
||||||
void* runtime·SysAlloc(uintptr);
|
|
||||||
void runtime·SysUnused(void*, uintptr);
|
|
||||||
void runtime·SysFree(void*, uintptr);
|
|
||||||
|
|
||||||
enum
|
enum
|
||||||
{
|
{
|
||||||
RefcountOverhead = 4, // one uint32 per object
|
RefcountOverhead = 4, // one uint32 per object
|
||||||
|
@ -88,7 +88,6 @@ type mHeap struct {
|
|||||||
free [maxMHeapList]mSpan
|
free [maxMHeapList]mSpan
|
||||||
large mSpan
|
large mSpan
|
||||||
allspans *mSpan
|
allspans *mSpan
|
||||||
map_ mHeapMap
|
|
||||||
min *byte
|
min *byte
|
||||||
max *byte
|
max *byte
|
||||||
closure_min *byte
|
closure_min *byte
|
||||||
|
@ -118,8 +118,7 @@ MCentral_Free(MCentral *c, void *v)
|
|||||||
int32 size;
|
int32 size;
|
||||||
|
|
||||||
// Find span for v.
|
// Find span for v.
|
||||||
page = (uintptr)v >> PageShift;
|
s = runtime·MHeap_Lookup(&runtime·mheap, v);
|
||||||
s = runtime·MHeap_Lookup(&runtime·mheap, page);
|
|
||||||
if(s == nil || s->ref == 0)
|
if(s == nil || s->ref == 0)
|
||||||
runtime·throw("invalid free");
|
runtime·throw("invalid free");
|
||||||
|
|
||||||
|
@ -76,7 +76,7 @@ scanblock(byte *b, int64 n)
|
|||||||
obj = vp[i];
|
obj = vp[i];
|
||||||
if(obj == nil)
|
if(obj == nil)
|
||||||
continue;
|
continue;
|
||||||
if(runtime·mheap.min <= (byte*)obj && (byte*)obj < runtime·mheap.max) {
|
if(runtime·mheap.arena_start <= (byte*)obj && (byte*)obj < runtime·mheap.arena_end) {
|
||||||
if(runtime·mlookup(obj, &obj, &size, nil, &refp)) {
|
if(runtime·mlookup(obj, &obj, &size, nil, &refp)) {
|
||||||
ref = *refp;
|
ref = *refp;
|
||||||
switch(ref & ~RefFlags) {
|
switch(ref & ~RefFlags) {
|
||||||
|
@ -41,7 +41,6 @@ runtime·MHeap_Init(MHeap *h, void *(*alloc)(uintptr))
|
|||||||
|
|
||||||
runtime·FixAlloc_Init(&h->spanalloc, sizeof(MSpan), alloc, RecordSpan, h);
|
runtime·FixAlloc_Init(&h->spanalloc, sizeof(MSpan), alloc, RecordSpan, h);
|
||||||
runtime·FixAlloc_Init(&h->cachealloc, sizeof(MCache), alloc, nil, nil);
|
runtime·FixAlloc_Init(&h->cachealloc, sizeof(MCache), alloc, nil, nil);
|
||||||
runtime·MHeapMap_Init(&h->map, alloc);
|
|
||||||
// h->mapcache needs no init
|
// h->mapcache needs no init
|
||||||
for(i=0; i<nelem(h->free); i++)
|
for(i=0; i<nelem(h->free); i++)
|
||||||
runtime·MSpanList_Init(&h->free[i]);
|
runtime·MSpanList_Init(&h->free[i]);
|
||||||
@ -79,6 +78,7 @@ MHeap_AllocLocked(MHeap *h, uintptr npage, int32 sizeclass)
|
|||||||
{
|
{
|
||||||
uintptr n;
|
uintptr n;
|
||||||
MSpan *s, *t;
|
MSpan *s, *t;
|
||||||
|
PageID p;
|
||||||
|
|
||||||
// Try in fixed-size lists up to max.
|
// Try in fixed-size lists up to max.
|
||||||
for(n=npage; n < nelem(h->free); n++) {
|
for(n=npage; n < nelem(h->free); n++) {
|
||||||
@ -112,18 +112,29 @@ HaveSpan:
|
|||||||
mstats.mspan_sys = h->spanalloc.sys;
|
mstats.mspan_sys = h->spanalloc.sys;
|
||||||
runtime·MSpan_Init(t, s->start + npage, s->npages - npage);
|
runtime·MSpan_Init(t, s->start + npage, s->npages - npage);
|
||||||
s->npages = npage;
|
s->npages = npage;
|
||||||
runtime·MHeapMap_Set(&h->map, t->start - 1, s);
|
p = t->start;
|
||||||
runtime·MHeapMap_Set(&h->map, t->start, t);
|
if(sizeof(void*) == 8)
|
||||||
runtime·MHeapMap_Set(&h->map, t->start + t->npages - 1, t);
|
p -= ((uintptr)h->arena_start>>PageShift);
|
||||||
|
if(p > 0)
|
||||||
|
h->map[p-1] = s;
|
||||||
|
h->map[p] = t;
|
||||||
|
h->map[p+t->npages-1] = t;
|
||||||
|
*(uintptr*)(t->start<<PageShift) = *(uintptr*)(s->start<<PageShift); // copy "needs zeroing" mark
|
||||||
t->state = MSpanInUse;
|
t->state = MSpanInUse;
|
||||||
MHeap_FreeLocked(h, t);
|
MHeap_FreeLocked(h, t);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if(*(uintptr*)(s->start<<PageShift) != 0)
|
||||||
|
runtime·memclr((byte*)(s->start<<PageShift), s->npages<<PageShift);
|
||||||
|
|
||||||
// Record span info, because gc needs to be
|
// Record span info, because gc needs to be
|
||||||
// able to map interior pointer to containing span.
|
// able to map interior pointer to containing span.
|
||||||
s->sizeclass = sizeclass;
|
s->sizeclass = sizeclass;
|
||||||
|
p = s->start;
|
||||||
|
if(sizeof(void*) == 8)
|
||||||
|
p -= ((uintptr)h->arena_start>>PageShift);
|
||||||
for(n=0; n<npage; n++)
|
for(n=0; n<npage; n++)
|
||||||
runtime·MHeapMap_Set(&h->map, s->start+n, s);
|
h->map[p+n] = s;
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -161,6 +172,7 @@ MHeap_Grow(MHeap *h, uintptr npage)
|
|||||||
uintptr ask;
|
uintptr ask;
|
||||||
void *v;
|
void *v;
|
||||||
MSpan *s;
|
MSpan *s;
|
||||||
|
PageID p;
|
||||||
|
|
||||||
// Ask for a big chunk, to reduce the number of mappings
|
// Ask for a big chunk, to reduce the number of mappings
|
||||||
// the operating system needs to track; also amortizes
|
// the operating system needs to track; also amortizes
|
||||||
@ -171,29 +183,21 @@ MHeap_Grow(MHeap *h, uintptr npage)
|
|||||||
if(ask < HeapAllocChunk)
|
if(ask < HeapAllocChunk)
|
||||||
ask = HeapAllocChunk;
|
ask = HeapAllocChunk;
|
||||||
|
|
||||||
v = runtime·SysAlloc(ask);
|
v = runtime·MHeap_SysAlloc(h, ask);
|
||||||
if(v == nil) {
|
if(v == nil) {
|
||||||
if(ask > (npage<<PageShift)) {
|
if(ask > (npage<<PageShift)) {
|
||||||
ask = npage<<PageShift;
|
ask = npage<<PageShift;
|
||||||
v = runtime·SysAlloc(ask);
|
v = runtime·MHeap_SysAlloc(h, ask);
|
||||||
}
|
}
|
||||||
if(v == nil)
|
if(v == nil)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
mstats.heap_sys += ask;
|
mstats.heap_sys += ask;
|
||||||
|
|
||||||
if((byte*)v < h->min || h->min == nil)
|
if((byte*)v < h->arena_start || h->arena_start == nil)
|
||||||
h->min = v;
|
h->arena_start = v;
|
||||||
if((byte*)v+ask > h->max)
|
if((byte*)v+ask > h->arena_end)
|
||||||
h->max = (byte*)v+ask;
|
h->arena_end = (byte*)v+ask;
|
||||||
|
|
||||||
// NOTE(rsc): In tcmalloc, if we've accumulated enough
|
|
||||||
// system allocations, the heap map gets entirely allocated
|
|
||||||
// in 32-bit mode. (In 64-bit mode that's not practical.)
|
|
||||||
if(!runtime·MHeapMap_Preallocate(&h->map, ((uintptr)v>>PageShift) - 1, (ask>>PageShift) + 2)) {
|
|
||||||
runtime·SysFree(v, ask);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a fake "in use" span and free it, so that the
|
// Create a fake "in use" span and free it, so that the
|
||||||
// right coalescing happens.
|
// right coalescing happens.
|
||||||
@ -201,35 +205,50 @@ MHeap_Grow(MHeap *h, uintptr npage)
|
|||||||
mstats.mspan_inuse = h->spanalloc.inuse;
|
mstats.mspan_inuse = h->spanalloc.inuse;
|
||||||
mstats.mspan_sys = h->spanalloc.sys;
|
mstats.mspan_sys = h->spanalloc.sys;
|
||||||
runtime·MSpan_Init(s, (uintptr)v>>PageShift, ask>>PageShift);
|
runtime·MSpan_Init(s, (uintptr)v>>PageShift, ask>>PageShift);
|
||||||
runtime·MHeapMap_Set(&h->map, s->start, s);
|
p = s->start;
|
||||||
runtime·MHeapMap_Set(&h->map, s->start + s->npages - 1, s);
|
if(sizeof(void*) == 8)
|
||||||
|
p -= ((uintptr)h->arena_start>>PageShift);
|
||||||
|
h->map[p] = s;
|
||||||
|
h->map[p + s->npages - 1] = s;
|
||||||
s->state = MSpanInUse;
|
s->state = MSpanInUse;
|
||||||
MHeap_FreeLocked(h, s);
|
MHeap_FreeLocked(h, s);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Look up the span at the given page number.
|
// Look up the span at the given address.
|
||||||
// Page number is guaranteed to be in map
|
// Address is guaranteed to be in map
|
||||||
// and is guaranteed to be start or end of span.
|
// and is guaranteed to be start or end of span.
|
||||||
MSpan*
|
MSpan*
|
||||||
runtime·MHeap_Lookup(MHeap *h, PageID p)
|
runtime·MHeap_Lookup(MHeap *h, void *v)
|
||||||
{
|
{
|
||||||
return runtime·MHeapMap_Get(&h->map, p);
|
uintptr p;
|
||||||
|
|
||||||
|
p = (uintptr)v;
|
||||||
|
if(sizeof(void*) == 8)
|
||||||
|
p -= (uintptr)h->arena_start;
|
||||||
|
return h->map[p >> PageShift];
|
||||||
}
|
}
|
||||||
|
|
||||||
// Look up the span at the given page number.
|
// Look up the span at the given address.
|
||||||
// Page number is *not* guaranteed to be in map
|
// Address is *not* guaranteed to be in map
|
||||||
// and may be anywhere in the span.
|
// and may be anywhere in the span.
|
||||||
// Map entries for the middle of a span are only
|
// Map entries for the middle of a span are only
|
||||||
// valid for allocated spans. Free spans may have
|
// valid for allocated spans. Free spans may have
|
||||||
// other garbage in their middles, so we have to
|
// other garbage in their middles, so we have to
|
||||||
// check for that.
|
// check for that.
|
||||||
MSpan*
|
MSpan*
|
||||||
runtime·MHeap_LookupMaybe(MHeap *h, PageID p)
|
runtime·MHeap_LookupMaybe(MHeap *h, void *v)
|
||||||
{
|
{
|
||||||
MSpan *s;
|
MSpan *s;
|
||||||
|
PageID p, q;
|
||||||
|
|
||||||
s = runtime·MHeapMap_GetMaybe(&h->map, p);
|
if((byte*)v < h->arena_start || (byte*)v >= h->arena_used)
|
||||||
|
return nil;
|
||||||
|
p = (uintptr)v>>PageShift;
|
||||||
|
q = p;
|
||||||
|
if(sizeof(void*) == 8)
|
||||||
|
q -= (uintptr)h->arena_start >> PageShift;
|
||||||
|
s = h->map[q];
|
||||||
if(s == nil || p < s->start || p - s->start >= s->npages)
|
if(s == nil || p < s->start || p - s->start >= s->npages)
|
||||||
return nil;
|
return nil;
|
||||||
if(s->state != MSpanInUse)
|
if(s->state != MSpanInUse)
|
||||||
@ -258,7 +277,9 @@ runtime·MHeap_Free(MHeap *h, MSpan *s, int32 acct)
|
|||||||
static void
|
static void
|
||||||
MHeap_FreeLocked(MHeap *h, MSpan *s)
|
MHeap_FreeLocked(MHeap *h, MSpan *s)
|
||||||
{
|
{
|
||||||
|
uintptr *sp, *tp;
|
||||||
MSpan *t;
|
MSpan *t;
|
||||||
|
PageID p;
|
||||||
|
|
||||||
if(s->state != MSpanInUse || s->ref != 0) {
|
if(s->state != MSpanInUse || s->ref != 0) {
|
||||||
runtime·printf("MHeap_FreeLocked - span %p ptr %p state %d ref %d\n", s, s->start<<PageShift, s->state, s->ref);
|
runtime·printf("MHeap_FreeLocked - span %p ptr %p state %d ref %d\n", s, s->start<<PageShift, s->state, s->ref);
|
||||||
@ -266,21 +287,30 @@ MHeap_FreeLocked(MHeap *h, MSpan *s)
|
|||||||
}
|
}
|
||||||
s->state = MSpanFree;
|
s->state = MSpanFree;
|
||||||
runtime·MSpanList_Remove(s);
|
runtime·MSpanList_Remove(s);
|
||||||
|
sp = (uintptr*)(s->start<<PageShift);
|
||||||
|
|
||||||
// Coalesce with earlier, later spans.
|
// Coalesce with earlier, later spans.
|
||||||
if((t = runtime·MHeapMap_Get(&h->map, s->start - 1)) != nil && t->state != MSpanInUse) {
|
p = s->start;
|
||||||
|
if(sizeof(void*) == 8)
|
||||||
|
p -= (uintptr)h->arena_start >> PageShift;
|
||||||
|
if(p > 0 && (t = h->map[p-1]) != nil && t->state != MSpanInUse) {
|
||||||
|
tp = (uintptr*)(t->start<<PageShift);
|
||||||
|
*tp |= *sp; // propagate "needs zeroing" mark
|
||||||
s->start = t->start;
|
s->start = t->start;
|
||||||
s->npages += t->npages;
|
s->npages += t->npages;
|
||||||
runtime·MHeapMap_Set(&h->map, s->start, s);
|
p -= t->npages;
|
||||||
|
h->map[p] = s;
|
||||||
runtime·MSpanList_Remove(t);
|
runtime·MSpanList_Remove(t);
|
||||||
t->state = MSpanDead;
|
t->state = MSpanDead;
|
||||||
runtime·FixAlloc_Free(&h->spanalloc, t);
|
runtime·FixAlloc_Free(&h->spanalloc, t);
|
||||||
mstats.mspan_inuse = h->spanalloc.inuse;
|
mstats.mspan_inuse = h->spanalloc.inuse;
|
||||||
mstats.mspan_sys = h->spanalloc.sys;
|
mstats.mspan_sys = h->spanalloc.sys;
|
||||||
}
|
}
|
||||||
if((t = runtime·MHeapMap_Get(&h->map, s->start + s->npages)) != nil && t->state != MSpanInUse) {
|
if(p+s->npages < nelem(h->map) && (t = h->map[p+s->npages]) != nil && t->state != MSpanInUse) {
|
||||||
|
tp = (uintptr*)(t->start<<PageShift);
|
||||||
|
*sp |= *tp; // propagate "needs zeroing" mark
|
||||||
s->npages += t->npages;
|
s->npages += t->npages;
|
||||||
runtime·MHeapMap_Set(&h->map, s->start + s->npages - 1, s);
|
h->map[p + s->npages - 1] = s;
|
||||||
runtime·MSpanList_Remove(t);
|
runtime·MSpanList_Remove(t);
|
||||||
t->state = MSpanDead;
|
t->state = MSpanDead;
|
||||||
runtime·FixAlloc_Free(&h->spanalloc, t);
|
runtime·FixAlloc_Free(&h->spanalloc, t);
|
||||||
|
@ -1,96 +0,0 @@
|
|||||||
// Copyright 2009 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Heap map, 32-bit version
|
|
||||||
// See malloc.h and mheap.c for overview.
|
|
||||||
|
|
||||||
#include "runtime.h"
|
|
||||||
#include "malloc.h"
|
|
||||||
|
|
||||||
// 3-level radix tree mapping page ids to Span*.
|
|
||||||
void
|
|
||||||
runtime·MHeapMap_Init(MHeapMap *m, void *(*allocator)(uintptr))
|
|
||||||
{
|
|
||||||
m->allocator = allocator;
|
|
||||||
}
|
|
||||||
|
|
||||||
MSpan*
|
|
||||||
runtime·MHeapMap_Get(MHeapMap *m, PageID k)
|
|
||||||
{
|
|
||||||
int32 i1, i2;
|
|
||||||
|
|
||||||
i2 = k & MHeapMap_Level2Mask;
|
|
||||||
k >>= MHeapMap_Level2Bits;
|
|
||||||
i1 = k & MHeapMap_Level1Mask;
|
|
||||||
k >>= MHeapMap_Level1Bits;
|
|
||||||
if(k != 0)
|
|
||||||
runtime·throw("MHeapMap_Get");
|
|
||||||
|
|
||||||
return m->p[i1]->s[i2];
|
|
||||||
}
|
|
||||||
|
|
||||||
MSpan*
|
|
||||||
runtime·MHeapMap_GetMaybe(MHeapMap *m, PageID k)
|
|
||||||
{
|
|
||||||
int32 i1, i2;
|
|
||||||
MHeapMapNode2 *p2;
|
|
||||||
|
|
||||||
i2 = k & MHeapMap_Level2Mask;
|
|
||||||
k >>= MHeapMap_Level2Bits;
|
|
||||||
i1 = k & MHeapMap_Level1Mask;
|
|
||||||
k >>= MHeapMap_Level1Bits;
|
|
||||||
if(k != 0)
|
|
||||||
runtime·throw("MHeapMap_Get");
|
|
||||||
|
|
||||||
p2 = m->p[i1];
|
|
||||||
if(p2 == nil)
|
|
||||||
return nil;
|
|
||||||
return p2->s[i2];
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
runtime·MHeapMap_Set(MHeapMap *m, PageID k, MSpan *s)
|
|
||||||
{
|
|
||||||
int32 i1, i2;
|
|
||||||
|
|
||||||
i2 = k & MHeapMap_Level2Mask;
|
|
||||||
k >>= MHeapMap_Level2Bits;
|
|
||||||
i1 = k & MHeapMap_Level1Mask;
|
|
||||||
k >>= MHeapMap_Level1Bits;
|
|
||||||
if(k != 0)
|
|
||||||
runtime·throw("MHeapMap_Set");
|
|
||||||
|
|
||||||
m->p[i1]->s[i2] = s;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Allocate the storage required for entries [k, k+1, ..., k+len-1]
|
|
||||||
// so that Get and Set calls need not check for nil pointers.
|
|
||||||
bool
|
|
||||||
runtime·MHeapMap_Preallocate(MHeapMap *m, PageID k, uintptr len)
|
|
||||||
{
|
|
||||||
uintptr end;
|
|
||||||
int32 i1;
|
|
||||||
MHeapMapNode2 *p2;
|
|
||||||
|
|
||||||
end = k+len;
|
|
||||||
while(k < end) {
|
|
||||||
if((k >> MHeapMap_TotalBits) != 0)
|
|
||||||
return false;
|
|
||||||
i1 = (k >> MHeapMap_Level2Bits) & MHeapMap_Level1Mask;
|
|
||||||
|
|
||||||
// first-level pointer
|
|
||||||
if(m->p[i1] == nil) {
|
|
||||||
p2 = m->allocator(sizeof *p2);
|
|
||||||
if(p2 == nil)
|
|
||||||
return false;
|
|
||||||
mstats.heapmap_sys += sizeof *p2;
|
|
||||||
m->p[i1] = p2;
|
|
||||||
}
|
|
||||||
|
|
||||||
// advance key past this leaf node
|
|
||||||
k = ((k >> MHeapMap_Level2Bits) + 1) << MHeapMap_Level2Bits;
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
@ -1,41 +0,0 @@
|
|||||||
// Copyright 2009 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Free(v) must be able to determine the MSpan containing v.
|
|
||||||
// The MHeapMap is a 2-level radix tree mapping page numbers to MSpans.
|
|
||||||
|
|
||||||
typedef struct MHeapMapNode2 MHeapMapNode2;
|
|
||||||
|
|
||||||
enum
|
|
||||||
{
|
|
||||||
// 32 bit address - 12 bit page size = 20 bits to map
|
|
||||||
MHeapMap_Level1Bits = 10,
|
|
||||||
MHeapMap_Level2Bits = 10,
|
|
||||||
|
|
||||||
MHeapMap_TotalBits =
|
|
||||||
MHeapMap_Level1Bits +
|
|
||||||
MHeapMap_Level2Bits,
|
|
||||||
|
|
||||||
MHeapMap_Level1Mask = (1<<MHeapMap_Level1Bits) - 1,
|
|
||||||
MHeapMap_Level2Mask = (1<<MHeapMap_Level2Bits) - 1,
|
|
||||||
};
|
|
||||||
|
|
||||||
struct MHeapMap
|
|
||||||
{
|
|
||||||
void *(*allocator)(uintptr);
|
|
||||||
MHeapMapNode2 *p[1<<MHeapMap_Level1Bits];
|
|
||||||
};
|
|
||||||
|
|
||||||
struct MHeapMapNode2
|
|
||||||
{
|
|
||||||
MSpan *s[1<<MHeapMap_Level2Bits];
|
|
||||||
};
|
|
||||||
|
|
||||||
void runtime·MHeapMap_Init(MHeapMap *m, void *(*allocator)(uintptr));
|
|
||||||
bool runtime·MHeapMap_Preallocate(MHeapMap *m, PageID k, uintptr npages);
|
|
||||||
MSpan* runtime·MHeapMap_Get(MHeapMap *m, PageID k);
|
|
||||||
MSpan* runtime·MHeapMap_GetMaybe(MHeapMap *m, PageID k);
|
|
||||||
void runtime·MHeapMap_Set(MHeapMap *m, PageID k, MSpan *v);
|
|
||||||
|
|
||||||
|
|
@ -1,23 +0,0 @@
|
|||||||
// Copyright 2010 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package runtime
|
|
||||||
|
|
||||||
const (
|
|
||||||
mHeapMap_Level1Bits = 10
|
|
||||||
mHeapMap_Level2Bits = 10
|
|
||||||
mHeapMap_TotalBits = mHeapMap_Level1Bits + mHeapMap_Level2Bits
|
|
||||||
|
|
||||||
mHeapMap_Level1Mask = (1 << mHeapMap_Level1Bits) - 1
|
|
||||||
mHeapMap_Level2Mask = (1 << mHeapMap_Level2Bits) - 1
|
|
||||||
)
|
|
||||||
|
|
||||||
type mHeapMap struct {
|
|
||||||
allocator func(uintptr)
|
|
||||||
p [1 << mHeapMap_Level1Bits]*mHeapMapNode2
|
|
||||||
}
|
|
||||||
|
|
||||||
type mHeapMapNode2 struct {
|
|
||||||
s [1 << mHeapMap_Level2Bits]*mSpan
|
|
||||||
}
|
|
@ -1,117 +0,0 @@
|
|||||||
// Copyright 2009 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Heap map, 64-bit version
|
|
||||||
// See malloc.h and mheap.c for overview.
|
|
||||||
|
|
||||||
#include "runtime.h"
|
|
||||||
#include "malloc.h"
|
|
||||||
|
|
||||||
// 3-level radix tree mapping page ids to Span*.
|
|
||||||
void
|
|
||||||
runtime·MHeapMap_Init(MHeapMap *m, void *(*allocator)(uintptr))
|
|
||||||
{
|
|
||||||
m->allocator = allocator;
|
|
||||||
}
|
|
||||||
|
|
||||||
MSpan*
|
|
||||||
runtime·MHeapMap_Get(MHeapMap *m, PageID k)
|
|
||||||
{
|
|
||||||
int32 i1, i2, i3;
|
|
||||||
|
|
||||||
i3 = k & MHeapMap_Level3Mask;
|
|
||||||
k >>= MHeapMap_Level3Bits;
|
|
||||||
i2 = k & MHeapMap_Level2Mask;
|
|
||||||
k >>= MHeapMap_Level2Bits;
|
|
||||||
i1 = k & MHeapMap_Level1Mask;
|
|
||||||
k >>= MHeapMap_Level1Bits;
|
|
||||||
if(k != 0)
|
|
||||||
runtime·throw("MHeapMap_Get");
|
|
||||||
|
|
||||||
return m->p[i1]->p[i2]->s[i3];
|
|
||||||
}
|
|
||||||
|
|
||||||
MSpan*
|
|
||||||
runtime·MHeapMap_GetMaybe(MHeapMap *m, PageID k)
|
|
||||||
{
|
|
||||||
int32 i1, i2, i3;
|
|
||||||
MHeapMapNode2 *p2;
|
|
||||||
MHeapMapNode3 *p3;
|
|
||||||
|
|
||||||
i3 = k & MHeapMap_Level3Mask;
|
|
||||||
k >>= MHeapMap_Level3Bits;
|
|
||||||
i2 = k & MHeapMap_Level2Mask;
|
|
||||||
k >>= MHeapMap_Level2Bits;
|
|
||||||
i1 = k & MHeapMap_Level1Mask;
|
|
||||||
k >>= MHeapMap_Level1Bits;
|
|
||||||
if(k != 0)
|
|
||||||
runtime·throw("MHeapMap_Get");
|
|
||||||
|
|
||||||
p2 = m->p[i1];
|
|
||||||
if(p2 == nil)
|
|
||||||
return nil;
|
|
||||||
p3 = p2->p[i2];
|
|
||||||
if(p3 == nil)
|
|
||||||
return nil;
|
|
||||||
return p3->s[i3];
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
runtime·MHeapMap_Set(MHeapMap *m, PageID k, MSpan *s)
|
|
||||||
{
|
|
||||||
int32 i1, i2, i3;
|
|
||||||
|
|
||||||
i3 = k & MHeapMap_Level3Mask;
|
|
||||||
k >>= MHeapMap_Level3Bits;
|
|
||||||
i2 = k & MHeapMap_Level2Mask;
|
|
||||||
k >>= MHeapMap_Level2Bits;
|
|
||||||
i1 = k & MHeapMap_Level1Mask;
|
|
||||||
k >>= MHeapMap_Level1Bits;
|
|
||||||
if(k != 0)
|
|
||||||
runtime·throw("MHeapMap_Set");
|
|
||||||
|
|
||||||
m->p[i1]->p[i2]->s[i3] = s;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Allocate the storage required for entries [k, k+1, ..., k+len-1]
|
|
||||||
// so that Get and Set calls need not check for nil pointers.
|
|
||||||
bool
|
|
||||||
runtime·MHeapMap_Preallocate(MHeapMap *m, PageID k, uintptr len)
|
|
||||||
{
|
|
||||||
uintptr end;
|
|
||||||
int32 i1, i2;
|
|
||||||
MHeapMapNode2 *p2;
|
|
||||||
MHeapMapNode3 *p3;
|
|
||||||
|
|
||||||
end = k+len;
|
|
||||||
while(k < end) {
|
|
||||||
if((k >> MHeapMap_TotalBits) != 0)
|
|
||||||
return false;
|
|
||||||
i2 = (k >> MHeapMap_Level3Bits) & MHeapMap_Level2Mask;
|
|
||||||
i1 = (k >> (MHeapMap_Level3Bits + MHeapMap_Level2Bits)) & MHeapMap_Level1Mask;
|
|
||||||
|
|
||||||
// first-level pointer
|
|
||||||
if((p2 = m->p[i1]) == nil) {
|
|
||||||
p2 = m->allocator(sizeof *p2);
|
|
||||||
if(p2 == nil)
|
|
||||||
return false;
|
|
||||||
mstats.heapmap_sys += sizeof *p2;
|
|
||||||
m->p[i1] = p2;
|
|
||||||
}
|
|
||||||
|
|
||||||
// second-level pointer
|
|
||||||
if(p2->p[i2] == nil) {
|
|
||||||
p3 = m->allocator(sizeof *p3);
|
|
||||||
if(p3 == nil)
|
|
||||||
return false;
|
|
||||||
mstats.heapmap_sys += sizeof *p3;
|
|
||||||
p2->p[i2] = p3;
|
|
||||||
}
|
|
||||||
|
|
||||||
// advance key past this leaf node
|
|
||||||
k = ((k >> MHeapMap_Level3Bits) + 1) << MHeapMap_Level3Bits;
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
@ -1,60 +0,0 @@
|
|||||||
// Copyright 2009 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Free(v) must be able to determine the MSpan containing v.
|
|
||||||
// The MHeapMap is a 3-level radix tree mapping page numbers to MSpans.
|
|
||||||
//
|
|
||||||
// NOTE(rsc): On a 32-bit platform (= 20-bit page numbers),
|
|
||||||
// we can swap in a 2-level radix tree.
|
|
||||||
//
|
|
||||||
// NOTE(rsc): We use a 3-level tree because tcmalloc does, but
|
|
||||||
// having only three levels requires approximately 1 MB per node
|
|
||||||
// in the tree, making the minimum map footprint 3 MB.
|
|
||||||
// Using a 4-level tree would cut the minimum footprint to 256 kB.
|
|
||||||
// On the other hand, it's just virtual address space: most of
|
|
||||||
// the memory is never going to be touched, thus never paged in.
|
|
||||||
|
|
||||||
typedef struct MHeapMapNode2 MHeapMapNode2;
|
|
||||||
typedef struct MHeapMapNode3 MHeapMapNode3;
|
|
||||||
|
|
||||||
enum
|
|
||||||
{
|
|
||||||
// 64 bit address - 12 bit page size = 52 bits to map
|
|
||||||
MHeapMap_Level1Bits = 18,
|
|
||||||
MHeapMap_Level2Bits = 18,
|
|
||||||
MHeapMap_Level3Bits = 16,
|
|
||||||
|
|
||||||
MHeapMap_TotalBits =
|
|
||||||
MHeapMap_Level1Bits +
|
|
||||||
MHeapMap_Level2Bits +
|
|
||||||
MHeapMap_Level3Bits,
|
|
||||||
|
|
||||||
MHeapMap_Level1Mask = (1<<MHeapMap_Level1Bits) - 1,
|
|
||||||
MHeapMap_Level2Mask = (1<<MHeapMap_Level2Bits) - 1,
|
|
||||||
MHeapMap_Level3Mask = (1<<MHeapMap_Level3Bits) - 1,
|
|
||||||
};
|
|
||||||
|
|
||||||
struct MHeapMap
|
|
||||||
{
|
|
||||||
void *(*allocator)(uintptr);
|
|
||||||
MHeapMapNode2 *p[1<<MHeapMap_Level1Bits];
|
|
||||||
};
|
|
||||||
|
|
||||||
struct MHeapMapNode2
|
|
||||||
{
|
|
||||||
MHeapMapNode3 *p[1<<MHeapMap_Level2Bits];
|
|
||||||
};
|
|
||||||
|
|
||||||
struct MHeapMapNode3
|
|
||||||
{
|
|
||||||
MSpan *s[1<<MHeapMap_Level3Bits];
|
|
||||||
};
|
|
||||||
|
|
||||||
void runtime·MHeapMap_Init(MHeapMap *m, void *(*allocator)(uintptr));
|
|
||||||
bool runtime·MHeapMap_Preallocate(MHeapMap *m, PageID k, uintptr npages);
|
|
||||||
MSpan* runtime·MHeapMap_Get(MHeapMap *m, PageID k);
|
|
||||||
MSpan* runtime·MHeapMap_GetMaybe(MHeapMap *m, PageID k);
|
|
||||||
void runtime·MHeapMap_Set(MHeapMap *m, PageID k, MSpan *v);
|
|
||||||
|
|
||||||
|
|
@ -1,31 +0,0 @@
|
|||||||
// Copyright 2010 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package runtime
|
|
||||||
|
|
||||||
const (
|
|
||||||
mHeapMap_Level1Bits = 18
|
|
||||||
mHeapMap_Level2Bits = 18
|
|
||||||
mHeapMap_Level3Bits = 16
|
|
||||||
mHeapMap_TotalBits = mHeapMap_Level1Bits + mHeapMap_Level2Bits + mHeapMap_Level3Bits
|
|
||||||
|
|
||||||
mHeapMap_Level1Mask = (1 << mHeapMap_Level1Bits) - 1
|
|
||||||
mHeapMap_Level2Mask = (1 << mHeapMap_Level2Bits) - 1
|
|
||||||
mHeapMap_Level3Mask = (1 << mHeapMap_Level3Bits) - 1
|
|
||||||
)
|
|
||||||
|
|
||||||
type mHeapMap struct {
|
|
||||||
allocator func(uintptr)
|
|
||||||
p [1 << mHeapMap_Level1Bits]*mHeapMapNode2
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
type mHeapMapNode2 struct {
|
|
||||||
p [1 << mHeapMap_Level2Bits]*mHeapMapNode3
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
type mHeapMapNode3 struct {
|
|
||||||
s [1 << mHeapMap_Level3Bits]*mSpan
|
|
||||||
}
|
|
@ -88,7 +88,6 @@ func WriteHeapProfile(w io.Writer) os.Error {
|
|||||||
fmt.Fprintf(b, "# Stack = %d / %d\n", s.StackInuse, s.StackSys)
|
fmt.Fprintf(b, "# Stack = %d / %d\n", s.StackInuse, s.StackSys)
|
||||||
fmt.Fprintf(b, "# MSpan = %d / %d\n", s.MSpanInuse, s.MSpanSys)
|
fmt.Fprintf(b, "# MSpan = %d / %d\n", s.MSpanInuse, s.MSpanSys)
|
||||||
fmt.Fprintf(b, "# MCache = %d / %d\n", s.MCacheInuse, s.MCacheSys)
|
fmt.Fprintf(b, "# MCache = %d / %d\n", s.MCacheInuse, s.MCacheSys)
|
||||||
fmt.Fprintf(b, "# MHeapMapSys = %d\n", s.MHeapMapSys)
|
|
||||||
fmt.Fprintf(b, "# BuckHashSys = %d\n", s.BuckHashSys)
|
fmt.Fprintf(b, "# BuckHashSys = %d\n", s.BuckHashSys)
|
||||||
|
|
||||||
fmt.Fprintf(b, "# NextGC = %d\n", s.NextGC)
|
fmt.Fprintf(b, "# NextGC = %d\n", s.NextGC)
|
||||||
|
@ -1190,3 +1190,9 @@ runtime·Goroutines(int32 ret)
|
|||||||
ret = runtime·sched.gcount;
|
ret = runtime·sched.gcount;
|
||||||
FLUSH(&ret);
|
FLUSH(&ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int32
|
||||||
|
runtime·mcount(void)
|
||||||
|
{
|
||||||
|
return runtime·sched.mcount;
|
||||||
|
}
|
||||||
|
@ -436,6 +436,7 @@ void runtime·addfinalizer(void*, void(*fn)(void*), int32);
|
|||||||
void runtime·walkfintab(void (*fn)(void*));
|
void runtime·walkfintab(void (*fn)(void*));
|
||||||
void runtime·runpanic(Panic*);
|
void runtime·runpanic(Panic*);
|
||||||
void* runtime·getcallersp(void*);
|
void* runtime·getcallersp(void*);
|
||||||
|
int32 runtime·mcount(void);
|
||||||
|
|
||||||
void runtime·exit(int32);
|
void runtime·exit(int32);
|
||||||
void runtime·breakpoint(void);
|
void runtime·breakpoint(void);
|
||||||
|
@ -33,12 +33,8 @@ extern void *runtime·VirtualFree;
|
|||||||
void*
|
void*
|
||||||
runtime·SysAlloc(uintptr n)
|
runtime·SysAlloc(uintptr n)
|
||||||
{
|
{
|
||||||
void *v;
|
mstats.sys += n;
|
||||||
|
return runtime·stdcall(runtime·VirtualAlloc, 4, v, n, MEM_COMMIT | MEM_RESERVE, PAGE_EXECUTE_READWRITE);
|
||||||
v = runtime·stdcall(runtime·VirtualAlloc, 4, nil, n, MEM_COMMIT | MEM_RESERVE, PAGE_EXECUTE_READWRITE);
|
|
||||||
if(v == 0)
|
|
||||||
abort("VirtualAlloc");
|
|
||||||
return v;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@ -53,13 +49,25 @@ runtime·SysFree(void *v, uintptr n)
|
|||||||
{
|
{
|
||||||
uintptr r;
|
uintptr r;
|
||||||
|
|
||||||
USED(n);
|
mstats.sys -= n;
|
||||||
r = (uintptr)runtime·stdcall(runtime·VirtualFree, 3, v, 0, MEM_RELEASE);
|
r = (uintptr)runtime·stdcall(runtime·VirtualFree, 3, v, 0, MEM_RELEASE);
|
||||||
if(r == 0)
|
if(r == 0)
|
||||||
abort("VirtualFree");
|
abort("VirtualFree");
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
runtime·SysMemInit(void)
|
runtime·SysReserve(void *v, uintptr n)
|
||||||
{
|
{
|
||||||
|
return runtime·stdcall(runtime·VirtualAlloc, 4, v, n, MEM_RESERVE, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
runtime·SysMap(void *v, uintptr n)
|
||||||
|
{
|
||||||
|
void *p;
|
||||||
|
|
||||||
|
mstats.sys += n;
|
||||||
|
p = runtime·stdcall(runtime·VirtualAlloc, 4, v, n, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
|
||||||
|
if(p != v)
|
||||||
|
runtime·throw("runtime: cannot map pages in arena address space");
|
||||||
}
|
}
|
||||||
|
4
test/run
4
test/run
@ -42,7 +42,9 @@ TMP2FILE=/tmp/gotest2-$$-$USER
|
|||||||
|
|
||||||
# don't run the machine out of memory: limit individual processes to 4GB.
|
# don't run the machine out of memory: limit individual processes to 4GB.
|
||||||
# on thresher, 3GB suffices to run the tests; with 2GB, peano fails.
|
# on thresher, 3GB suffices to run the tests; with 2GB, peano fails.
|
||||||
ulimit -v 4000000
|
# Linux charges reserved but not mapped addresses to ulimit -v
|
||||||
|
# so we have to use ulimit -m.
|
||||||
|
ulimit -m 4000000
|
||||||
|
|
||||||
# no core files please
|
# no core files please
|
||||||
ulimit -c 0
|
ulimit -c 0
|
||||||
|
Loading…
Reference in New Issue
Block a user