// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // See malloc.h for overview. // // TODO(rsc): double-check stats. package runtime #include "runtime.h" #include "arch_GOARCH.h" #include "stack.h" #include "malloc.h" #include "defs_GOOS_GOARCH.h" #include "type.h" #include "typekind.h" #include "race.h" #pragma dataflag 16 /* mark mheap as 'no pointers', hiding from garbage collector */ MHeap runtime·mheap; int32 runtime·checking; extern MStats mstats; // defined in zruntime_def_$GOOS_$GOARCH.go extern volatile intgo runtime·MemProfileRate; // Allocate an object of at least size bytes. // Small objects are allocated from the per-thread cache's free lists. // Large objects (> 32 kB) are allocated straight from the heap. void* runtime·mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed) { int32 sizeclass; intgo rate; MCache *c; uintptr npages; MSpan *s; void *v; if(runtime·gcwaiting && g != m->g0 && m->locks == 0) runtime·gosched(); if(m->mallocing) runtime·throw("malloc/free - deadlock"); m->mallocing = 1; if(size == 0) size = 1; if(DebugTypeAtBlockEnd) size += sizeof(uintptr); c = m->mcache; c->local_nmalloc++; if(size <= MaxSmallSize) { // Allocate from mcache free lists. sizeclass = runtime·SizeToClass(size); size = runtime·class_to_size[sizeclass]; v = runtime·MCache_Alloc(c, sizeclass, size, zeroed); if(v == nil) runtime·throw("out of memory"); c->local_alloc += size; c->local_total_alloc += size; c->local_by_size[sizeclass].nmalloc++; } else { // TODO(rsc): Report tracebacks for very large allocations. // Allocate directly from heap. npages = size >> PageShift; if((size & PageMask) != 0) npages++; s = runtime·MHeap_Alloc(&runtime·mheap, npages, 0, 1, zeroed); if(s == nil) runtime·throw("out of memory"); size = npages<local_alloc += size; c->local_total_alloc += size; v = (void*)(s->start << PageShift); // setup for mark sweep runtime·markspan(v, 0, 0, true); } if (sizeof(void*) == 4 && c->local_total_alloc >= (1<<30)) { // purge cache stats to prevent overflow runtime·lock(&runtime·mheap); runtime·purgecachedstats(c); runtime·unlock(&runtime·mheap); } if(!(flag & FlagNoGC)) runtime·markallocated(v, size, (flag&FlagNoPointers) != 0); if(DebugTypeAtBlockEnd) *(uintptr*)((uintptr)v+size-sizeof(uintptr)) = 0; m->mallocing = 0; if(!(flag & FlagNoProfiling) && (rate = runtime·MemProfileRate) > 0) { if(size >= rate) goto profile; if(m->mcache->next_sample > size) m->mcache->next_sample -= size; else { // pick next profile time // If you change this, also change allocmcache. if(rate > 0x3fffffff) // make 2*rate not overflow rate = 0x3fffffff; m->mcache->next_sample = runtime·fastrand1() % (2*rate); profile: runtime·setblockspecial(v, true); runtime·MProf_Malloc(v, size); } } if(dogc && mstats.heap_alloc >= mstats.next_gc) runtime·gc(0); if(raceenabled) { runtime·racemalloc(v, size, m->racepc); m->racepc = nil; } return v; } void* runtime·malloc(uintptr size) { return runtime·mallocgc(size, 0, 0, 1); } // Free the object whose base pointer is v. void runtime·free(void *v) { int32 sizeclass; MSpan *s; MCache *c; uint32 prof; uintptr size; if(v == nil) return; // If you change this also change mgc0.c:/^sweep, // which has a copy of the guts of free. if(m->mallocing) runtime·throw("malloc/free - deadlock"); m->mallocing = 1; if(!runtime·mlookup(v, nil, nil, &s)) { runtime·printf("free %p: not an allocated block\n", v); runtime·throw("free runtime·mlookup"); } prof = runtime·blockspecial(v); if(raceenabled) runtime·racefree(v); // Find size class for v. sizeclass = s->sizeclass; c = m->mcache; if(sizeclass == 0) { // Large object. size = s->npages<start< sizeof(uintptr)) ((uintptr*)v)[1] = 1; // mark as "needs to be zeroed" // Must mark v freed before calling MCache_Free: // it might coalesce v and other blocks into a bigger span // and change the bitmap further. runtime·markfreed(v, size); c->local_by_size[sizeclass].nfree++; runtime·MCache_Free(c, v, sizeclass, size); } c->local_nfree++; c->local_alloc -= size; if(prof) runtime·MProf_Free(v, size); m->mallocing = 0; } int32 runtime·mlookup(void *v, byte **base, uintptr *size, MSpan **sp) { uintptr n, i; byte *p; MSpan *s; m->mcache->local_nlookup++; if (sizeof(void*) == 4 && m->mcache->local_nlookup >= (1<<30)) { // purge cache stats to prevent overflow runtime·lock(&runtime·mheap); runtime·purgecachedstats(m->mcache); runtime·unlock(&runtime·mheap); } s = runtime·MHeap_LookupMaybe(&runtime·mheap, v); if(sp) *sp = s; if(s == nil) { runtime·checkfreed(v, 1); if(base) *base = nil; if(size) *size = 0; return 0; } p = (byte*)((uintptr)s->start<sizeclass == 0) { // Large object. if(base) *base = p; if(size) *size = s->npages<= (byte*)s->limit) { // pointers past the last block do not count as pointers. return 0; } n = s->elemsize; if(base) { i = ((byte*)v - p)/n; *base = p + i*n; } if(size) *size = n; return 1; } MCache* runtime·allocmcache(void) { intgo rate; MCache *c; runtime·lock(&runtime·mheap); c = runtime·FixAlloc_Alloc(&runtime·mheap.cachealloc); mstats.mcache_inuse = runtime·mheap.cachealloc.inuse; mstats.mcache_sys = runtime·mheap.cachealloc.sys; runtime·unlock(&runtime·mheap); runtime·memclr((byte*)c, sizeof(*c)); // Set first allocation sample size. rate = runtime·MemProfileRate; if(rate > 0x3fffffff) // make 2*rate not overflow rate = 0x3fffffff; if(rate != 0) c->next_sample = runtime·fastrand1() % (2*rate); return c; } void runtime·freemcache(MCache *c) { runtime·MCache_ReleaseAll(c); runtime·lock(&runtime·mheap); runtime·purgecachedstats(c); runtime·FixAlloc_Free(&runtime·mheap.cachealloc, c); runtime·unlock(&runtime·mheap); } void runtime·purgecachedstats(MCache *c) { // Protected by either heap or GC lock. mstats.heap_alloc += c->local_cachealloc; c->local_cachealloc = 0; mstats.heap_objects += c->local_objects; c->local_objects = 0; mstats.nmalloc += c->local_nmalloc; c->local_nmalloc = 0; mstats.nfree += c->local_nfree; c->local_nfree = 0; mstats.nlookup += c->local_nlookup; c->local_nlookup = 0; mstats.alloc += c->local_alloc; c->local_alloc= 0; mstats.total_alloc += c->local_total_alloc; c->local_total_alloc= 0; } uintptr runtime·sizeof_C_MStats = sizeof(MStats); #define MaxArena32 (2U<<30) void runtime·mallocinit(void) { byte *p; uintptr arena_size, bitmap_size; extern byte end[]; byte *want; uintptr limit; p = nil; arena_size = 0; bitmap_size = 0; // for 64-bit build USED(p); USED(arena_size); USED(bitmap_size); runtime·InitSizes(); limit = runtime·memlimit(); // Set up the allocation arena, a contiguous area of memory where // allocated data will be found. The arena begins with a bitmap large // enough to hold 4 bits per allocated word. if(sizeof(void*) == 8 && (limit == 0 || limit > (1<<30))) { // On a 64-bit machine, allocate from a single contiguous reservation. // 128 GB (MaxMem) should be big enough for now. // // The code will work with the reservation at any address, but ask // SysReserve to use 0x000000c000000000 if possible. // Allocating a 128 GB region takes away 37 bits, and the amd64 // doesn't let us choose the top 17 bits, so that leaves the 11 bits // in the middle of 0x00c0 for us to choose. Choosing 0x00c0 means // that the valid memory addresses will begin 0x00c0, 0x00c1, ..., 0x0x00df. // In little-endian, that's c0 00, c1 00, ..., df 00. None of those are valid // UTF-8 sequences, and they are otherwise as far away from // ff (likely a common byte) as possible. An earlier attempt to use 0x11f8 // caused out of memory errors on OS X during thread allocations. // These choices are both for debuggability and to reduce the // odds of the conservative garbage collector not collecting memory // because some non-pointer block of memory had a bit pattern // that matched a memory address. // // Actually we reserve 136 GB (because the bitmap ends up being 8 GB) // but it hardly matters: e0 00 is not valid UTF-8 either. // // If this fails we fall back to the 32 bit memory mechanism arena_size = MaxMem; bitmap_size = arena_size / (sizeof(void*)*8/4); p = runtime·SysReserve((void*)(0x00c0ULL<<32), bitmap_size + arena_size); } if (p == nil) { // On a 32-bit machine, we can't typically get away // with a giant virtual address space reservation. // Instead we map the memory information bitmap // immediately after the data segment, large enough // to handle another 2GB of mappings (256 MB), // along with a reservation for another 512 MB of memory. // When that gets used up, we'll start asking the kernel // for any memory anywhere and hope it's in the 2GB // following the bitmap (presumably the executable begins // near the bottom of memory, so we'll have to use up // most of memory before the kernel resorts to giving out // memory before the beginning of the text segment). // // Alternatively we could reserve 512 MB bitmap, enough // for 4GB of mappings, and then accept any memory the // kernel threw at us, but normally that's a waste of 512 MB // of address space, which is probably too much in a 32-bit world. bitmap_size = MaxArena32 / (sizeof(void*)*8/4); arena_size = 512<<20; if(limit > 0 && arena_size+bitmap_size > limit) { bitmap_size = (limit / 9) & ~((1<mcache = runtime·allocmcache(); // See if it works. runtime·free(runtime·malloc(1)); } void* runtime·MHeap_SysAlloc(MHeap *h, uintptr n) { byte *p; if(n > h->arena_end - h->arena_used) { // We are in 32-bit mode, maybe we didn't use all possible address space yet. // Reserve some more space. byte *new_end; uintptr needed; needed = (uintptr)h->arena_used + n - (uintptr)h->arena_end; // Round wanted arena size to a multiple of 256MB. needed = (needed + (256<<20) - 1) & ~((256<<20)-1); new_end = h->arena_end + needed; if(new_end <= h->arena_start + MaxArena32) { p = runtime·SysReserve(h->arena_end, new_end - h->arena_end); if(p == h->arena_end) h->arena_end = new_end; } } if(n <= h->arena_end - h->arena_used) { // Keep taking from our reservation. p = h->arena_used; runtime·SysMap(p, n); h->arena_used += n; runtime·MHeap_MapBits(h); if(raceenabled) runtime·racemapshadow(p, n); return p; } // If using 64-bit, our reservation is all we have. if(sizeof(void*) == 8 && (uintptr)h->bitmap >= 0xffffffffU) return nil; // On 32-bit, once the reservation is gone we can // try to get memory at a location chosen by the OS // and hope that it is in the range we allocated bitmap for. p = runtime·SysAlloc(n); if(p == nil) return nil; if(p < h->arena_start || p+n - h->arena_start >= MaxArena32) { runtime·printf("runtime: memory allocated by OS (%p) not in usable range [%p,%p)\n", p, h->arena_start, h->arena_start+MaxArena32); runtime·SysFree(p, n); return nil; } if(p+n > h->arena_used) { h->arena_used = p+n; if(h->arena_used > h->arena_end) h->arena_end = h->arena_used; runtime·MHeap_MapBits(h); if(raceenabled) runtime·racemapshadow(p, n); } return p; } static Lock settype_lock; void runtime·settype_flush(M *mp, bool sysalloc) { uintptr *buf, *endbuf; uintptr size, ofs, j, t; uintptr ntypes, nbytes2, nbytes3; uintptr *data2; byte *data3; bool sysalloc3; void *v; uintptr typ, p; MSpan *s; buf = mp->settype_buf; endbuf = buf + mp->settype_bufsize; runtime·lock(&settype_lock); while(buf < endbuf) { v = (void*)*buf; *buf = 0; buf++; typ = *buf; buf++; // (Manually inlined copy of runtime·MHeap_Lookup) p = (uintptr)v>>PageShift; if(sizeof(void*) == 8) p -= (uintptr)runtime·mheap.arena_start >> PageShift; s = runtime·mheap.map[p]; if(s->sizeclass == 0) { s->types.compression = MTypes_Single; s->types.data = typ; continue; } size = s->elemsize; ofs = ((uintptr)v - (s->start<types.compression) { case MTypes_Empty: ntypes = (s->npages << PageShift) / size; nbytes3 = 8*sizeof(uintptr) + 1*ntypes; if(!sysalloc) { data3 = runtime·mallocgc(nbytes3, FlagNoPointers, 0, 1); } else { data3 = runtime·SysAlloc(nbytes3); if(0) runtime·printf("settype(0->3): SysAlloc(%x) --> %p\n", (uint32)nbytes3, data3); } s->types.compression = MTypes_Bytes; s->types.sysalloc = sysalloc; s->types.data = (uintptr)data3; ((uintptr*)data3)[1] = typ; data3[8*sizeof(uintptr) + ofs] = 1; break; case MTypes_Words: ((uintptr*)s->types.data)[ofs] = typ; break; case MTypes_Bytes: data3 = (byte*)s->types.data; for(j=1; j<8; j++) { if(((uintptr*)data3)[j] == typ) { break; } if(((uintptr*)data3)[j] == 0) { ((uintptr*)data3)[j] = typ; break; } } if(j < 8) { data3[8*sizeof(uintptr) + ofs] = j; } else { ntypes = (s->npages << PageShift) / size; nbytes2 = ntypes * sizeof(uintptr); if(!sysalloc) { data2 = runtime·mallocgc(nbytes2, FlagNoPointers, 0, 1); } else { data2 = runtime·SysAlloc(nbytes2); if(0) runtime·printf("settype.(3->2): SysAlloc(%x) --> %p\n", (uint32)nbytes2, data2); } sysalloc3 = s->types.sysalloc; s->types.compression = MTypes_Words; s->types.sysalloc = sysalloc; s->types.data = (uintptr)data2; // Move the contents of data3 to data2. Then deallocate data3. for(j=0; j2): SysFree(%p,%x)\n", data3, (uint32)nbytes3); runtime·SysFree(data3, nbytes3); } data2[ofs] = typ; } break; } } runtime·unlock(&settype_lock); mp->settype_bufsize = 0; } // It is forbidden to use this function if it is possible that // explicit deallocation via calling runtime·free(v) may happen. void runtime·settype(void *v, uintptr t) { M *mp; uintptr *buf; uintptr i; MSpan *s; if(t == 0) runtime·throw("settype: zero type"); mp = m; buf = mp->settype_buf; i = mp->settype_bufsize; buf[i+0] = (uintptr)v; buf[i+1] = t; i += 2; mp->settype_bufsize = i; if(i == nelem(mp->settype_buf)) { runtime·settype_flush(mp, false); } if(DebugTypeAtBlockEnd) { s = runtime·MHeap_Lookup(&runtime·mheap, v); *(uintptr*)((uintptr)v+s->elemsize-sizeof(uintptr)) = t; } } void runtime·settype_sysfree(MSpan *s) { uintptr ntypes, nbytes; if(!s->types.sysalloc) return; nbytes = (uintptr)-1; switch (s->types.compression) { case MTypes_Words: ntypes = (s->npages << PageShift) / s->elemsize; nbytes = ntypes * sizeof(uintptr); break; case MTypes_Bytes: ntypes = (s->npages << PageShift) / s->elemsize; nbytes = 8*sizeof(uintptr) + 1*ntypes; break; } if(nbytes != (uintptr)-1) { if(0) runtime·printf("settype: SysFree(%p,%x)\n", (void*)s->types.data, (uint32)nbytes); runtime·SysFree((void*)s->types.data, nbytes); } } uintptr runtime·gettype(void *v) { MSpan *s; uintptr t, ofs; byte *data; s = runtime·MHeap_LookupMaybe(&runtime·mheap, v); if(s != nil) { t = 0; switch(s->types.compression) { case MTypes_Empty: break; case MTypes_Single: t = s->types.data; break; case MTypes_Words: ofs = (uintptr)v - (s->start<types.data)[ofs/s->elemsize]; break; case MTypes_Bytes: ofs = (uintptr)v - (s->start<types.data; t = data[8*sizeof(uintptr) + ofs/s->elemsize]; t = ((uintptr*)data)[t]; break; default: runtime·throw("runtime·gettype: invalid compression kind"); } if(0) { runtime·lock(&settype_lock); runtime·printf("%p -> %d,%X\n", v, (int32)s->types.compression, (int64)t); runtime·unlock(&settype_lock); } return t; } return 0; } // Runtime stubs. void* runtime·mal(uintptr n) { return runtime·mallocgc(n, 0, 1, 1); } #pragma textflag 7 void runtime·new(Type *typ, uint8 *ret) { uint32 flag; if(raceenabled) m->racepc = runtime·getcallerpc(&typ); if(typ->size == 0) { // All 0-length allocations use this pointer. // The language does not require the allocations to // have distinct values. ret = (uint8*)&runtime·zerobase; } else { flag = typ->kind&KindNoPointers ? FlagNoPointers : 0; ret = runtime·mallocgc(typ->size, flag, 1, 1); if(UseSpanType && !flag) { if(false) { runtime·printf("new %S: %p\n", *typ->string, ret); } runtime·settype(ret, (uintptr)typ | TypeInfo_SingleObject); } } FLUSH(&ret); } // same as runtime·new, but callable from C void* runtime·cnew(Type *typ) { uint32 flag; void *ret; if(raceenabled) m->racepc = runtime·getcallerpc(&typ); if(typ->size == 0) { // All 0-length allocations use this pointer. // The language does not require the allocations to // have distinct values. ret = (uint8*)&runtime·zerobase; } else { flag = typ->kind&KindNoPointers ? FlagNoPointers : 0; ret = runtime·mallocgc(typ->size, flag, 1, 1); if(UseSpanType && !flag) { if(false) { runtime·printf("new %S: %p\n", *typ->string, ret); } runtime·settype(ret, (uintptr)typ | TypeInfo_SingleObject); } } return ret; } typedef struct StackCacheNode StackCacheNode; struct StackCacheNode { StackCacheNode *next; void* batch[StackCacheBatch-1]; }; static StackCacheNode *stackcache; static Lock stackcachemu; // stackcacherefill/stackcacherelease implement global cache of stack segments. // The cache is required to prevent unlimited growth of per-thread caches. static void stackcacherefill(void) { StackCacheNode *n; int32 i, pos; runtime·lock(&stackcachemu); n = stackcache; if(n) stackcache = n->next; runtime·unlock(&stackcachemu); if(n == nil) { n = (StackCacheNode*)runtime·SysAlloc(FixedStack*StackCacheBatch); if(n == nil) runtime·throw("out of memory (staccachekrefill)"); runtime·xadd64(&mstats.stacks_sys, FixedStack*StackCacheBatch); for(i = 0; i < StackCacheBatch-1; i++) n->batch[i] = (byte*)n + (i+1)*FixedStack; } pos = m->stackcachepos; for(i = 0; i < StackCacheBatch-1; i++) { m->stackcache[pos] = n->batch[i]; pos = (pos + 1) % StackCacheSize; } m->stackcache[pos] = n; pos = (pos + 1) % StackCacheSize; m->stackcachepos = pos; m->stackcachecnt += StackCacheBatch; } static void stackcacherelease(void) { StackCacheNode *n; uint32 i, pos; pos = (m->stackcachepos - m->stackcachecnt) % StackCacheSize; n = (StackCacheNode*)m->stackcache[pos]; pos = (pos + 1) % StackCacheSize; for(i = 0; i < StackCacheBatch-1; i++) { n->batch[i] = m->stackcache[pos]; pos = (pos + 1) % StackCacheSize; } m->stackcachecnt -= StackCacheBatch; runtime·lock(&stackcachemu); n->next = stackcache; stackcache = n; runtime·unlock(&stackcachemu); } void* runtime·stackalloc(uint32 n) { uint32 pos; void *v; // Stackalloc must be called on scheduler stack, so that we // never try to grow the stack during the code that stackalloc runs. // Doing so would cause a deadlock (issue 1547). if(g != m->g0) runtime·throw("stackalloc not on scheduler stack"); // Stack allocator uses malloc/free most of the time, // but if we're in the middle of malloc and need stack, // we have to do something else to avoid deadlock. // In that case, we fall back on a fixed-size free-list // allocator, assuming that inside malloc all the stack // frames are small, so that all the stack allocations // will be a single size, the minimum (right now, 5k). if(n == FixedStack || m->mallocing || m->gcing) { if(n != FixedStack) { runtime·printf("stackalloc: in malloc, size=%d want %d", FixedStack, n); runtime·throw("stackalloc"); } if(m->stackcachecnt == 0) stackcacherefill(); pos = m->stackcachepos; pos = (pos - 1) % StackCacheSize; v = m->stackcache[pos]; m->stackcachepos = pos; m->stackcachecnt--; m->stackinuse++; return v; } return runtime·mallocgc(n, FlagNoProfiling|FlagNoGC, 0, 0); } void runtime·stackfree(void *v, uintptr n) { uint32 pos; if(n == FixedStack || m->mallocing || m->gcing) { if(m->stackcachecnt == StackCacheSize) stackcacherelease(); pos = m->stackcachepos; m->stackcache[pos] = v; m->stackcachepos = (pos + 1) % StackCacheSize; m->stackcachecnt++; m->stackinuse--; return; } runtime·free(v); } func GC() { runtime·gc(1); } func SetFinalizer(obj Eface, finalizer Eface) { byte *base; uintptr size; FuncType *ft; int32 i; uintptr nret; Type *t; if(obj.type == nil) { runtime·printf("runtime.SetFinalizer: first argument is nil interface\n"); goto throw; } if(obj.type->kind != KindPtr) { runtime·printf("runtime.SetFinalizer: first argument is %S, not pointer\n", *obj.type->string); goto throw; } if(!runtime·mlookup(obj.data, &base, &size, nil) || obj.data != base) { runtime·printf("runtime.SetFinalizer: pointer not at beginning of allocated block\n"); goto throw; } nret = 0; if(finalizer.type != nil) { if(finalizer.type->kind != KindFunc) goto badfunc; ft = (FuncType*)finalizer.type; if(ft->dotdotdot || ft->in.len != 1 || *(Type**)ft->in.array != obj.type) goto badfunc; // compute size needed for return parameters for(i=0; iout.len; i++) { t = ((Type**)ft->out.array)[i]; nret = (nret + t->align - 1) & ~(t->align - 1); nret += t->size; } nret = (nret + sizeof(void*)-1) & ~(sizeof(void*)-1); } if(!runtime·addfinalizer(obj.data, finalizer.data, nret)) { runtime·printf("runtime.SetFinalizer: finalizer already set\n"); goto throw; } return; badfunc: runtime·printf("runtime.SetFinalizer: second argument is %S, not func(%S)\n", *finalizer.type->string, *obj.type->string); throw: runtime·throw("runtime.SetFinalizer"); }