mirror of
https://github.com/golang/go
synced 2024-11-19 21:14:43 -07:00
3dcedb620c
If it didn't reach the limit, we can try extending the arena before resorting to random memory mappings and praying for the kernel to be kind. Fixes #3173. R=rsc, rsc CC=golang-dev https://golang.org/cl/5725045
528 lines
15 KiB
Plaintext
528 lines
15 KiB
Plaintext
// Copyright 2009 The Go Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style
|
|
// license that can be found in the LICENSE file.
|
|
|
|
// See malloc.h for overview.
|
|
//
|
|
// TODO(rsc): double-check stats.
|
|
|
|
package runtime
|
|
#include "runtime.h"
|
|
#include "arch_GOARCH.h"
|
|
#include "stack.h"
|
|
#include "malloc.h"
|
|
#include "defs_GOOS_GOARCH.h"
|
|
#include "type.h"
|
|
|
|
#pragma dataflag 16 /* mark mheap as 'no pointers', hiding from garbage collector */
|
|
MHeap runtime·mheap;
|
|
|
|
extern MStats mstats; // defined in extern.go
|
|
|
|
extern volatile int32 runtime·MemProfileRate;
|
|
|
|
// Allocate an object of at least size bytes.
|
|
// Small objects are allocated from the per-thread cache's free lists.
|
|
// Large objects (> 32 kB) are allocated straight from the heap.
|
|
void*
|
|
runtime·mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed)
|
|
{
|
|
int32 sizeclass, rate;
|
|
MCache *c;
|
|
uintptr npages;
|
|
MSpan *s;
|
|
void *v;
|
|
|
|
if(runtime·gcwaiting && g != m->g0 && m->locks == 0)
|
|
runtime·gosched();
|
|
if(m->mallocing)
|
|
runtime·throw("malloc/free - deadlock");
|
|
m->mallocing = 1;
|
|
if(size == 0)
|
|
size = 1;
|
|
|
|
c = m->mcache;
|
|
c->local_nmalloc++;
|
|
if(size <= MaxSmallSize) {
|
|
// Allocate from mcache free lists.
|
|
sizeclass = runtime·SizeToClass(size);
|
|
size = runtime·class_to_size[sizeclass];
|
|
v = runtime·MCache_Alloc(c, sizeclass, size, zeroed);
|
|
if(v == nil)
|
|
runtime·throw("out of memory");
|
|
c->local_alloc += size;
|
|
c->local_total_alloc += size;
|
|
c->local_by_size[sizeclass].nmalloc++;
|
|
} else {
|
|
// TODO(rsc): Report tracebacks for very large allocations.
|
|
|
|
// Allocate directly from heap.
|
|
npages = size >> PageShift;
|
|
if((size & PageMask) != 0)
|
|
npages++;
|
|
s = runtime·MHeap_Alloc(&runtime·mheap, npages, 0, 1);
|
|
if(s == nil)
|
|
runtime·throw("out of memory");
|
|
size = npages<<PageShift;
|
|
c->local_alloc += size;
|
|
c->local_total_alloc += size;
|
|
v = (void*)(s->start << PageShift);
|
|
|
|
// setup for mark sweep
|
|
runtime·markspan(v, 0, 0, true);
|
|
}
|
|
if(!(flag & FlagNoGC))
|
|
runtime·markallocated(v, size, (flag&FlagNoPointers) != 0);
|
|
|
|
m->mallocing = 0;
|
|
|
|
if(!(flag & FlagNoProfiling) && (rate = runtime·MemProfileRate) > 0) {
|
|
if(size >= rate)
|
|
goto profile;
|
|
if(m->mcache->next_sample > size)
|
|
m->mcache->next_sample -= size;
|
|
else {
|
|
// pick next profile time
|
|
// If you change this, also change allocmcache.
|
|
if(rate > 0x3fffffff) // make 2*rate not overflow
|
|
rate = 0x3fffffff;
|
|
m->mcache->next_sample = runtime·fastrand1() % (2*rate);
|
|
profile:
|
|
runtime·setblockspecial(v, true);
|
|
runtime·MProf_Malloc(v, size);
|
|
}
|
|
}
|
|
|
|
if(dogc && mstats.heap_alloc >= mstats.next_gc)
|
|
runtime·gc(0);
|
|
return v;
|
|
}
|
|
|
|
void*
|
|
runtime·malloc(uintptr size)
|
|
{
|
|
return runtime·mallocgc(size, 0, 0, 1);
|
|
}
|
|
|
|
// Free the object whose base pointer is v.
|
|
void
|
|
runtime·free(void *v)
|
|
{
|
|
int32 sizeclass;
|
|
MSpan *s;
|
|
MCache *c;
|
|
uint32 prof;
|
|
uintptr size;
|
|
|
|
if(v == nil)
|
|
return;
|
|
|
|
// If you change this also change mgc0.c:/^sweep,
|
|
// which has a copy of the guts of free.
|
|
|
|
if(m->mallocing)
|
|
runtime·throw("malloc/free - deadlock");
|
|
m->mallocing = 1;
|
|
|
|
if(!runtime·mlookup(v, nil, nil, &s)) {
|
|
runtime·printf("free %p: not an allocated block\n", v);
|
|
runtime·throw("free runtime·mlookup");
|
|
}
|
|
prof = runtime·blockspecial(v);
|
|
|
|
// Find size class for v.
|
|
sizeclass = s->sizeclass;
|
|
c = m->mcache;
|
|
if(sizeclass == 0) {
|
|
// Large object.
|
|
size = s->npages<<PageShift;
|
|
*(uintptr*)(s->start<<PageShift) = 1; // mark as "needs to be zeroed"
|
|
// Must mark v freed before calling unmarkspan and MHeap_Free:
|
|
// they might coalesce v into other spans and change the bitmap further.
|
|
runtime·markfreed(v, size);
|
|
runtime·unmarkspan(v, 1<<PageShift);
|
|
runtime·MHeap_Free(&runtime·mheap, s, 1);
|
|
} else {
|
|
// Small object.
|
|
size = runtime·class_to_size[sizeclass];
|
|
if(size > sizeof(uintptr))
|
|
((uintptr*)v)[1] = 1; // mark as "needs to be zeroed"
|
|
// Must mark v freed before calling MCache_Free:
|
|
// it might coalesce v and other blocks into a bigger span
|
|
// and change the bitmap further.
|
|
runtime·markfreed(v, size);
|
|
c->local_by_size[sizeclass].nfree++;
|
|
runtime·MCache_Free(c, v, sizeclass, size);
|
|
}
|
|
c->local_alloc -= size;
|
|
if(prof)
|
|
runtime·MProf_Free(v, size);
|
|
m->mallocing = 0;
|
|
}
|
|
|
|
int32
|
|
runtime·mlookup(void *v, byte **base, uintptr *size, MSpan **sp)
|
|
{
|
|
uintptr n, i;
|
|
byte *p;
|
|
MSpan *s;
|
|
|
|
m->mcache->local_nlookup++;
|
|
s = runtime·MHeap_LookupMaybe(&runtime·mheap, v);
|
|
if(sp)
|
|
*sp = s;
|
|
if(s == nil) {
|
|
runtime·checkfreed(v, 1);
|
|
if(base)
|
|
*base = nil;
|
|
if(size)
|
|
*size = 0;
|
|
return 0;
|
|
}
|
|
|
|
p = (byte*)((uintptr)s->start<<PageShift);
|
|
if(s->sizeclass == 0) {
|
|
// Large object.
|
|
if(base)
|
|
*base = p;
|
|
if(size)
|
|
*size = s->npages<<PageShift;
|
|
return 1;
|
|
}
|
|
|
|
if((byte*)v >= (byte*)s->limit) {
|
|
// pointers past the last block do not count as pointers.
|
|
return 0;
|
|
}
|
|
|
|
n = runtime·class_to_size[s->sizeclass];
|
|
if(base) {
|
|
i = ((byte*)v - p)/n;
|
|
*base = p + i*n;
|
|
}
|
|
if(size)
|
|
*size = n;
|
|
|
|
return 1;
|
|
}
|
|
|
|
MCache*
|
|
runtime·allocmcache(void)
|
|
{
|
|
int32 rate;
|
|
MCache *c;
|
|
|
|
runtime·lock(&runtime·mheap);
|
|
c = runtime·FixAlloc_Alloc(&runtime·mheap.cachealloc);
|
|
mstats.mcache_inuse = runtime·mheap.cachealloc.inuse;
|
|
mstats.mcache_sys = runtime·mheap.cachealloc.sys;
|
|
runtime·unlock(&runtime·mheap);
|
|
|
|
// Set first allocation sample size.
|
|
rate = runtime·MemProfileRate;
|
|
if(rate > 0x3fffffff) // make 2*rate not overflow
|
|
rate = 0x3fffffff;
|
|
if(rate != 0)
|
|
c->next_sample = runtime·fastrand1() % (2*rate);
|
|
|
|
return c;
|
|
}
|
|
|
|
void
|
|
runtime·purgecachedstats(M* m)
|
|
{
|
|
MCache *c;
|
|
|
|
// Protected by either heap or GC lock.
|
|
c = m->mcache;
|
|
mstats.heap_alloc += c->local_cachealloc;
|
|
c->local_cachealloc = 0;
|
|
mstats.heap_objects += c->local_objects;
|
|
c->local_objects = 0;
|
|
mstats.nmalloc += c->local_nmalloc;
|
|
c->local_nmalloc = 0;
|
|
mstats.nfree += c->local_nfree;
|
|
c->local_nfree = 0;
|
|
mstats.nlookup += c->local_nlookup;
|
|
c->local_nlookup = 0;
|
|
mstats.alloc += c->local_alloc;
|
|
c->local_alloc= 0;
|
|
mstats.total_alloc += c->local_total_alloc;
|
|
c->local_total_alloc= 0;
|
|
}
|
|
|
|
uintptr runtime·sizeof_C_MStats = sizeof(MStats);
|
|
|
|
#define MaxArena32 (2U<<30)
|
|
|
|
void
|
|
runtime·mallocinit(void)
|
|
{
|
|
byte *p;
|
|
uintptr arena_size, bitmap_size;
|
|
extern byte end[];
|
|
byte *want;
|
|
uintptr limit;
|
|
|
|
p = nil;
|
|
arena_size = 0;
|
|
bitmap_size = 0;
|
|
|
|
// for 64-bit build
|
|
USED(p);
|
|
USED(arena_size);
|
|
USED(bitmap_size);
|
|
|
|
runtime·InitSizes();
|
|
|
|
limit = runtime·memlimit();
|
|
|
|
// Set up the allocation arena, a contiguous area of memory where
|
|
// allocated data will be found. The arena begins with a bitmap large
|
|
// enough to hold 4 bits per allocated word.
|
|
if(sizeof(void*) == 8 && (limit == 0 || limit > (1<<30))) {
|
|
// On a 64-bit machine, allocate from a single contiguous reservation.
|
|
// 16 GB should be big enough for now.
|
|
//
|
|
// The code will work with the reservation at any address, but ask
|
|
// SysReserve to use 0x000000f800000000 if possible.
|
|
// Allocating a 16 GB region takes away 36 bits, and the amd64
|
|
// doesn't let us choose the top 17 bits, so that leaves the 11 bits
|
|
// in the middle of 0x00f8 for us to choose. Choosing 0x00f8 means
|
|
// that the valid memory addresses will begin 0x00f8, 0x00f9, 0x00fa, 0x00fb.
|
|
// None of the bytes f8 f9 fa fb can appear in valid UTF-8, and
|
|
// they are otherwise as far from ff (likely a common byte) as possible.
|
|
// Choosing 0x00 for the leading 6 bits was more arbitrary, but it
|
|
// is not a common ASCII code point either. Using 0x11f8 instead
|
|
// caused out of memory errors on OS X during thread allocations.
|
|
// These choices are both for debuggability and to reduce the
|
|
// odds of the conservative garbage collector not collecting memory
|
|
// because some non-pointer block of memory had a bit pattern
|
|
// that matched a memory address.
|
|
//
|
|
// Actually we reserve 17 GB (because the bitmap ends up being 1 GB)
|
|
// but it hardly matters: fc is not valid UTF-8 either, and we have to
|
|
// allocate 15 GB before we get that far.
|
|
//
|
|
// If this fails we fall back to the 32 bit memory mechanism
|
|
arena_size = 16LL<<30;
|
|
bitmap_size = arena_size / (sizeof(void*)*8/4);
|
|
p = runtime·SysReserve((void*)(0x00f8ULL<<32), bitmap_size + arena_size);
|
|
}
|
|
if (p == nil) {
|
|
// On a 32-bit machine, we can't typically get away
|
|
// with a giant virtual address space reservation.
|
|
// Instead we map the memory information bitmap
|
|
// immediately after the data segment, large enough
|
|
// to handle another 2GB of mappings (256 MB),
|
|
// along with a reservation for another 512 MB of memory.
|
|
// When that gets used up, we'll start asking the kernel
|
|
// for any memory anywhere and hope it's in the 2GB
|
|
// following the bitmap (presumably the executable begins
|
|
// near the bottom of memory, so we'll have to use up
|
|
// most of memory before the kernel resorts to giving out
|
|
// memory before the beginning of the text segment).
|
|
//
|
|
// Alternatively we could reserve 512 MB bitmap, enough
|
|
// for 4GB of mappings, and then accept any memory the
|
|
// kernel threw at us, but normally that's a waste of 512 MB
|
|
// of address space, which is probably too much in a 32-bit world.
|
|
bitmap_size = MaxArena32 / (sizeof(void*)*8/4);
|
|
arena_size = 512<<20;
|
|
if(limit > 0 && arena_size+bitmap_size > limit) {
|
|
bitmap_size = (limit / 9) & ~((1<<PageShift) - 1);
|
|
arena_size = bitmap_size * 8;
|
|
}
|
|
|
|
// SysReserve treats the address we ask for, end, as a hint,
|
|
// not as an absolute requirement. If we ask for the end
|
|
// of the data segment but the operating system requires
|
|
// a little more space before we can start allocating, it will
|
|
// give out a slightly higher pointer. Except QEMU, which
|
|
// is buggy, as usual: it won't adjust the pointer upward.
|
|
// So adjust it upward a little bit ourselves: 1/4 MB to get
|
|
// away from the running binary image and then round up
|
|
// to a MB boundary.
|
|
want = (byte*)(((uintptr)end + (1<<18) + (1<<20) - 1)&~((1<<20)-1));
|
|
p = runtime·SysReserve(want, bitmap_size + arena_size);
|
|
if(p == nil)
|
|
runtime·throw("runtime: cannot reserve arena virtual address space");
|
|
if((uintptr)p & (((uintptr)1<<PageShift)-1))
|
|
runtime·printf("runtime: SysReserve returned unaligned address %p; asked for %p", p, bitmap_size+arena_size);
|
|
}
|
|
if((uintptr)p & (((uintptr)1<<PageShift)-1))
|
|
runtime·throw("runtime: SysReserve returned unaligned address");
|
|
|
|
runtime·mheap.bitmap = p;
|
|
runtime·mheap.arena_start = p + bitmap_size;
|
|
runtime·mheap.arena_used = runtime·mheap.arena_start;
|
|
runtime·mheap.arena_end = runtime·mheap.arena_start + arena_size;
|
|
|
|
// Initialize the rest of the allocator.
|
|
runtime·MHeap_Init(&runtime·mheap, runtime·SysAlloc);
|
|
m->mcache = runtime·allocmcache();
|
|
|
|
// See if it works.
|
|
runtime·free(runtime·malloc(1));
|
|
}
|
|
|
|
void*
|
|
runtime·MHeap_SysAlloc(MHeap *h, uintptr n)
|
|
{
|
|
byte *p;
|
|
|
|
if(n > h->arena_end - h->arena_used) {
|
|
// We are in 32-bit mode, maybe we didn't use all possible address space yet.
|
|
// Reserve some more space.
|
|
byte *new_end;
|
|
uintptr needed;
|
|
|
|
needed = (uintptr)h->arena_used + n - (uintptr)h->arena_end;
|
|
// Round wanted arena size to a multiple of 256MB.
|
|
needed = (needed + (256<<20) - 1) & ~((256<<20)-1);
|
|
new_end = h->arena_end + needed;
|
|
if(new_end <= h->arena_start + MaxArena32) {
|
|
p = runtime·SysReserve(h->arena_end, new_end - h->arena_end);
|
|
if(p == h->arena_end)
|
|
h->arena_end = new_end;
|
|
}
|
|
}
|
|
if(n <= h->arena_end - h->arena_used) {
|
|
// Keep taking from our reservation.
|
|
p = h->arena_used;
|
|
runtime·SysMap(p, n);
|
|
h->arena_used += n;
|
|
runtime·MHeap_MapBits(h);
|
|
return p;
|
|
}
|
|
|
|
// If using 64-bit, our reservation is all we have.
|
|
if(sizeof(void*) == 8 && (uintptr)h->bitmap >= 0xffffffffU)
|
|
return nil;
|
|
|
|
// On 32-bit, once the reservation is gone we can
|
|
// try to get memory at a location chosen by the OS
|
|
// and hope that it is in the range we allocated bitmap for.
|
|
p = runtime·SysAlloc(n);
|
|
if(p == nil)
|
|
return nil;
|
|
|
|
if(p < h->arena_start || p+n - h->arena_start >= MaxArena32) {
|
|
runtime·printf("runtime: memory allocated by OS (%p) not in usable range [%p,%p)\n",
|
|
p, h->arena_start, h->arena_start+MaxArena32);
|
|
runtime·SysFree(p, n);
|
|
return nil;
|
|
}
|
|
|
|
if(p+n > h->arena_used) {
|
|
h->arena_used = p+n;
|
|
if(h->arena_used > h->arena_end)
|
|
h->arena_end = h->arena_used;
|
|
runtime·MHeap_MapBits(h);
|
|
}
|
|
|
|
return p;
|
|
}
|
|
|
|
// Runtime stubs.
|
|
|
|
void*
|
|
runtime·mal(uintptr n)
|
|
{
|
|
return runtime·mallocgc(n, 0, 1, 1);
|
|
}
|
|
|
|
func new(typ *Type) (ret *uint8) {
|
|
uint32 flag = typ->kind&KindNoPointers ? FlagNoPointers : 0;
|
|
ret = runtime·mallocgc(typ->size, flag, 1, 1);
|
|
FLUSH(&ret);
|
|
}
|
|
|
|
void*
|
|
runtime·stackalloc(uint32 n)
|
|
{
|
|
// Stackalloc must be called on scheduler stack, so that we
|
|
// never try to grow the stack during the code that stackalloc runs.
|
|
// Doing so would cause a deadlock (issue 1547).
|
|
if(g != m->g0)
|
|
runtime·throw("stackalloc not on scheduler stack");
|
|
|
|
// Stack allocator uses malloc/free most of the time,
|
|
// but if we're in the middle of malloc and need stack,
|
|
// we have to do something else to avoid deadlock.
|
|
// In that case, we fall back on a fixed-size free-list
|
|
// allocator, assuming that inside malloc all the stack
|
|
// frames are small, so that all the stack allocations
|
|
// will be a single size, the minimum (right now, 5k).
|
|
if(m->mallocing || m->gcing || n == FixedStack) {
|
|
if(n != FixedStack) {
|
|
runtime·printf("stackalloc: in malloc, size=%d want %d", FixedStack, n);
|
|
runtime·throw("stackalloc");
|
|
}
|
|
return runtime·FixAlloc_Alloc(m->stackalloc);
|
|
}
|
|
return runtime·mallocgc(n, FlagNoProfiling|FlagNoGC, 0, 0);
|
|
}
|
|
|
|
void
|
|
runtime·stackfree(void *v, uintptr n)
|
|
{
|
|
if(m->mallocing || m->gcing || n == FixedStack) {
|
|
runtime·FixAlloc_Free(m->stackalloc, v);
|
|
return;
|
|
}
|
|
runtime·free(v);
|
|
}
|
|
|
|
func GC() {
|
|
runtime·gc(1);
|
|
}
|
|
|
|
func SetFinalizer(obj Eface, finalizer Eface) {
|
|
byte *base;
|
|
uintptr size;
|
|
FuncType *ft;
|
|
int32 i, nret;
|
|
Type *t;
|
|
|
|
if(obj.type == nil) {
|
|
runtime·printf("runtime.SetFinalizer: first argument is nil interface\n");
|
|
goto throw;
|
|
}
|
|
if(obj.type->kind != KindPtr) {
|
|
runtime·printf("runtime.SetFinalizer: first argument is %S, not pointer\n", *obj.type->string);
|
|
goto throw;
|
|
}
|
|
if(!runtime·mlookup(obj.data, &base, &size, nil) || obj.data != base) {
|
|
runtime·printf("runtime.SetFinalizer: pointer not at beginning of allocated block\n");
|
|
goto throw;
|
|
}
|
|
nret = 0;
|
|
if(finalizer.type != nil) {
|
|
if(finalizer.type->kind != KindFunc)
|
|
goto badfunc;
|
|
ft = (FuncType*)finalizer.type;
|
|
if(ft->dotdotdot || ft->in.len != 1 || *(Type**)ft->in.array != obj.type)
|
|
goto badfunc;
|
|
|
|
// compute size needed for return parameters
|
|
for(i=0; i<ft->out.len; i++) {
|
|
t = ((Type**)ft->out.array)[i];
|
|
nret = (nret + t->align - 1) & ~(t->align - 1);
|
|
nret += t->size;
|
|
}
|
|
nret = (nret + sizeof(void*)-1) & ~(sizeof(void*)-1);
|
|
}
|
|
|
|
if(!runtime·addfinalizer(obj.data, finalizer.data, nret)) {
|
|
runtime·printf("runtime.SetFinalizer: finalizer already set\n");
|
|
goto throw;
|
|
}
|
|
return;
|
|
|
|
badfunc:
|
|
runtime·printf("runtime.SetFinalizer: second argument is %S, not func(%S)\n", *finalizer.type->string, *obj.type->string);
|
|
throw:
|
|
runtime·throw("runtime.SetFinalizer");
|
|
}
|