2011-12-12 16:10:11 -07:00
|
|
|
// Copyright 2010 The Go Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
#include "runtime.h"
|
2011-12-16 13:33:58 -07:00
|
|
|
#include "arch_GOARCH.h"
|
|
|
|
#include "defs_GOOS_GOARCH.h"
|
|
|
|
#include "os_GOOS.h"
|
2011-12-12 16:10:11 -07:00
|
|
|
#include "malloc.h"
|
2014-09-04 19:12:48 -06:00
|
|
|
#include "../../cmd/ld/textflag.h"
|
2011-12-12 16:10:11 -07:00
|
|
|
|
|
|
|
enum
|
|
|
|
{
|
|
|
|
ENOMEM = 12,
|
|
|
|
};
|
|
|
|
|
2014-09-04 19:12:48 -06:00
|
|
|
#pragma textflag NOSPLIT
|
2011-12-12 16:10:11 -07:00
|
|
|
void*
|
2014-08-29 22:54:40 -06:00
|
|
|
runtime·sysAlloc(uintptr n, uint64 *stat)
|
2011-12-12 16:10:11 -07:00
|
|
|
{
|
|
|
|
void *v;
|
|
|
|
|
2013-02-24 07:47:22 -07:00
|
|
|
v = runtime·mmap(nil, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
|
2011-12-12 16:10:11 -07:00
|
|
|
if(v < (void*)4096)
|
|
|
|
return nil;
|
runtime: account for all sys memory in MemStats
Currently lots of sys allocations are not accounted in any of XxxSys,
including GC bitmap, spans table, GC roots blocks, GC finalizer blocks,
iface table, netpoll descriptors and more. Up to ~20% can unaccounted.
This change introduces 2 new stats: GCSys and OtherSys for GC metadata
and all other misc allocations, respectively.
Also ensures that all XxxSys indeed sum up to Sys. All sys memory allocation
functions require the stat for accounting, so that it's impossible to miss something.
Also fix updating of mcache_sys/inuse, they were not updated after deallocation.
test/bench/garbage/parser before:
Sys 670064344
HeapSys 610271232
StackSys 65536
MSpanSys 14204928
MCacheSys 16384
BuckHashSys 1439992
after:
Sys 670064344
HeapSys 610271232
StackSys 65536
MSpanSys 14188544
MCacheSys 16384
BuckHashSys 3194304
GCSys 39198688
OtherSys 3129656
Fixes #5799.
R=rsc, dave, alex.brainman
CC=golang-dev
https://golang.org/cl/12946043
2013-09-06 14:55:40 -06:00
|
|
|
runtime·xadd64(stat, n);
|
2011-12-12 16:10:11 -07:00
|
|
|
return v;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
runtime·SysUnused(void *v, uintptr n)
|
|
|
|
{
|
2012-11-26 04:34:01 -07:00
|
|
|
runtime·madvise(v, n, MADV_FREE);
|
2011-12-12 16:10:11 -07:00
|
|
|
}
|
|
|
|
|
2013-08-14 11:54:07 -06:00
|
|
|
void
|
|
|
|
runtime·SysUsed(void *v, uintptr n)
|
|
|
|
{
|
|
|
|
USED(v);
|
|
|
|
USED(n);
|
|
|
|
}
|
|
|
|
|
2011-12-12 16:10:11 -07:00
|
|
|
void
|
runtime: account for all sys memory in MemStats
Currently lots of sys allocations are not accounted in any of XxxSys,
including GC bitmap, spans table, GC roots blocks, GC finalizer blocks,
iface table, netpoll descriptors and more. Up to ~20% can unaccounted.
This change introduces 2 new stats: GCSys and OtherSys for GC metadata
and all other misc allocations, respectively.
Also ensures that all XxxSys indeed sum up to Sys. All sys memory allocation
functions require the stat for accounting, so that it's impossible to miss something.
Also fix updating of mcache_sys/inuse, they were not updated after deallocation.
test/bench/garbage/parser before:
Sys 670064344
HeapSys 610271232
StackSys 65536
MSpanSys 14204928
MCacheSys 16384
BuckHashSys 1439992
after:
Sys 670064344
HeapSys 610271232
StackSys 65536
MSpanSys 14188544
MCacheSys 16384
BuckHashSys 3194304
GCSys 39198688
OtherSys 3129656
Fixes #5799.
R=rsc, dave, alex.brainman
CC=golang-dev
https://golang.org/cl/12946043
2013-09-06 14:55:40 -06:00
|
|
|
runtime·SysFree(void *v, uintptr n, uint64 *stat)
|
2011-12-12 16:10:11 -07:00
|
|
|
{
|
runtime: account for all sys memory in MemStats
Currently lots of sys allocations are not accounted in any of XxxSys,
including GC bitmap, spans table, GC roots blocks, GC finalizer blocks,
iface table, netpoll descriptors and more. Up to ~20% can unaccounted.
This change introduces 2 new stats: GCSys and OtherSys for GC metadata
and all other misc allocations, respectively.
Also ensures that all XxxSys indeed sum up to Sys. All sys memory allocation
functions require the stat for accounting, so that it's impossible to miss something.
Also fix updating of mcache_sys/inuse, they were not updated after deallocation.
test/bench/garbage/parser before:
Sys 670064344
HeapSys 610271232
StackSys 65536
MSpanSys 14204928
MCacheSys 16384
BuckHashSys 1439992
after:
Sys 670064344
HeapSys 610271232
StackSys 65536
MSpanSys 14188544
MCacheSys 16384
BuckHashSys 3194304
GCSys 39198688
OtherSys 3129656
Fixes #5799.
R=rsc, dave, alex.brainman
CC=golang-dev
https://golang.org/cl/12946043
2013-09-06 14:55:40 -06:00
|
|
|
runtime·xadd64(stat, -(uint64)n);
|
2011-12-12 16:10:11 -07:00
|
|
|
runtime·munmap(v, n);
|
|
|
|
}
|
|
|
|
|
runtime: grow stack by copying
On stack overflow, if all frames on the stack are
copyable, we copy the frames to a new stack twice
as large as the old one. During GC, if a G is using
less than 1/4 of its stack, copy the stack to a stack
half its size.
TODO
- Do something about C frames. When a C frame is in the
stack segment, it isn't copyable. We allocate a new segment
in this case.
- For idempotent C code, we can abort it, copy the stack,
then retry. I'm working on a separate CL for this.
- For other C code, we can raise the stackguard
to the lowest Go frame so the next call that Go frame
makes triggers a copy, which will then succeed.
- Pick a starting stack size?
The plan is that eventually we reach a point where the
stack contains only copyable frames.
LGTM=rsc
R=dvyukov, rsc
CC=golang-codereviews
https://golang.org/cl/54650044
2014-02-27 00:28:44 -07:00
|
|
|
void
|
|
|
|
runtime·SysFault(void *v, uintptr n)
|
|
|
|
{
|
2014-03-13 09:04:00 -06:00
|
|
|
runtime·mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE|MAP_FIXED, -1, 0);
|
runtime: grow stack by copying
On stack overflow, if all frames on the stack are
copyable, we copy the frames to a new stack twice
as large as the old one. During GC, if a G is using
less than 1/4 of its stack, copy the stack to a stack
half its size.
TODO
- Do something about C frames. When a C frame is in the
stack segment, it isn't copyable. We allocate a new segment
in this case.
- For idempotent C code, we can abort it, copy the stack,
then retry. I'm working on a separate CL for this.
- For other C code, we can raise the stackguard
to the lowest Go frame so the next call that Go frame
makes triggers a copy, which will then succeed.
- Pick a starting stack size?
The plan is that eventually we reach a point where the
stack contains only copyable frames.
LGTM=rsc
R=dvyukov, rsc
CC=golang-codereviews
https://golang.org/cl/54650044
2014-02-27 00:28:44 -07:00
|
|
|
}
|
|
|
|
|
2011-12-12 16:10:11 -07:00
|
|
|
void*
|
2014-03-25 14:22:19 -06:00
|
|
|
runtime·SysReserve(void *v, uintptr n, bool *reserved)
|
2011-12-12 16:10:11 -07:00
|
|
|
{
|
|
|
|
void *p;
|
|
|
|
|
|
|
|
// On 64-bit, people with ulimit -v set complain if we reserve too
|
|
|
|
// much address space. Instead, assume that the reservation is okay
|
|
|
|
// and check the assumption in SysMap.
|
2014-03-25 14:22:19 -06:00
|
|
|
if(sizeof(void*) == 8 && n > 1LL<<32) {
|
|
|
|
*reserved = false;
|
2011-12-12 16:10:11 -07:00
|
|
|
return v;
|
2014-03-25 14:22:19 -06:00
|
|
|
}
|
2011-12-12 16:10:11 -07:00
|
|
|
|
|
|
|
p = runtime·mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE, -1, 0);
|
2013-03-22 09:15:52 -06:00
|
|
|
if(p < (void*)4096)
|
2011-12-12 16:10:11 -07:00
|
|
|
return nil;
|
2014-03-25 14:22:19 -06:00
|
|
|
*reserved = true;
|
2013-03-17 19:18:49 -06:00
|
|
|
return p;
|
2011-12-12 16:10:11 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2014-03-25 14:22:19 -06:00
|
|
|
runtime·SysMap(void *v, uintptr n, bool reserved, uint64 *stat)
|
2011-12-12 16:10:11 -07:00
|
|
|
{
|
|
|
|
void *p;
|
|
|
|
|
runtime: account for all sys memory in MemStats
Currently lots of sys allocations are not accounted in any of XxxSys,
including GC bitmap, spans table, GC roots blocks, GC finalizer blocks,
iface table, netpoll descriptors and more. Up to ~20% can unaccounted.
This change introduces 2 new stats: GCSys and OtherSys for GC metadata
and all other misc allocations, respectively.
Also ensures that all XxxSys indeed sum up to Sys. All sys memory allocation
functions require the stat for accounting, so that it's impossible to miss something.
Also fix updating of mcache_sys/inuse, they were not updated after deallocation.
test/bench/garbage/parser before:
Sys 670064344
HeapSys 610271232
StackSys 65536
MSpanSys 14204928
MCacheSys 16384
BuckHashSys 1439992
after:
Sys 670064344
HeapSys 610271232
StackSys 65536
MSpanSys 14188544
MCacheSys 16384
BuckHashSys 3194304
GCSys 39198688
OtherSys 3129656
Fixes #5799.
R=rsc, dave, alex.brainman
CC=golang-dev
https://golang.org/cl/12946043
2013-09-06 14:55:40 -06:00
|
|
|
runtime·xadd64(stat, n);
|
2011-12-12 16:10:11 -07:00
|
|
|
|
|
|
|
// On 64-bit, we don't actually have v reserved, so tread carefully.
|
2014-03-25 14:22:19 -06:00
|
|
|
if(!reserved) {
|
2013-02-24 07:47:22 -07:00
|
|
|
p = runtime·mmap(v, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
|
2013-03-17 19:18:49 -06:00
|
|
|
if(p == (void*)ENOMEM)
|
2011-12-12 16:10:11 -07:00
|
|
|
runtime·throw("runtime: out of memory");
|
|
|
|
if(p != v) {
|
|
|
|
runtime·printf("runtime: address space conflict: map(%p) = %p\n", v, p);
|
|
|
|
runtime·throw("runtime: address space conflict");
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-02-24 07:47:22 -07:00
|
|
|
p = runtime·mmap(v, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_FIXED|MAP_PRIVATE, -1, 0);
|
2013-03-17 19:18:49 -06:00
|
|
|
if(p == (void*)ENOMEM)
|
2011-12-12 16:10:11 -07:00
|
|
|
runtime·throw("runtime: out of memory");
|
|
|
|
if(p != v)
|
|
|
|
runtime·throw("runtime: cannot map pages in arena address space");
|
|
|
|
}
|