mirror of
https://github.com/golang/go
synced 2024-10-04 08:31:22 -06:00
1665b006a5
On stack overflow, if all frames on the stack are copyable, we copy the frames to a new stack twice as large as the old one. During GC, if a G is using less than 1/4 of its stack, copy the stack to a stack half its size. TODO - Do something about C frames. When a C frame is in the stack segment, it isn't copyable. We allocate a new segment in this case. - For idempotent C code, we can abort it, copy the stack, then retry. I'm working on a separate CL for this. - For other C code, we can raise the stackguard to the lowest Go frame so the next call that Go frame makes triggers a copy, which will then succeed. - Pick a starting stack size? The plan is that eventually we reach a point where the stack contains only copyable frames. LGTM=rsc R=dvyukov, rsc CC=golang-codereviews https://golang.org/cl/54650044
96 lines
2.0 KiB
C
96 lines
2.0 KiB
C
// Copyright 2010 The Go Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style
|
|
// license that can be found in the LICENSE file.
|
|
|
|
#include "runtime.h"
|
|
#include "arch_GOARCH.h"
|
|
#include "defs_GOOS_GOARCH.h"
|
|
#include "os_GOOS.h"
|
|
#include "malloc.h"
|
|
|
|
enum
|
|
{
|
|
ENOMEM = 12,
|
|
};
|
|
|
|
void*
|
|
runtime·SysAlloc(uintptr n, uint64 *stat)
|
|
{
|
|
void *v;
|
|
|
|
v = runtime·mmap(nil, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
|
|
if(v < (void*)4096)
|
|
return nil;
|
|
runtime·xadd64(stat, n);
|
|
return v;
|
|
}
|
|
|
|
void
|
|
runtime·SysUnused(void *v, uintptr n)
|
|
{
|
|
runtime·madvise(v, n, MADV_FREE);
|
|
}
|
|
|
|
void
|
|
runtime·SysUsed(void *v, uintptr n)
|
|
{
|
|
USED(v);
|
|
USED(n);
|
|
}
|
|
|
|
void
|
|
runtime·SysFree(void *v, uintptr n, uint64 *stat)
|
|
{
|
|
runtime·xadd64(stat, -(uint64)n);
|
|
runtime·munmap(v, n);
|
|
}
|
|
|
|
void
|
|
runtime·SysFault(void *v, uintptr n)
|
|
{
|
|
runtime·mmap(v, n, PROT_NONE, 0, -1, 0);
|
|
}
|
|
|
|
void*
|
|
runtime·SysReserve(void *v, uintptr n)
|
|
{
|
|
void *p;
|
|
|
|
// On 64-bit, people with ulimit -v set complain if we reserve too
|
|
// much address space. Instead, assume that the reservation is okay
|
|
// and check the assumption in SysMap.
|
|
if(sizeof(void*) == 8)
|
|
return v;
|
|
|
|
p = runtime·mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE, -1, 0);
|
|
if(p < (void*)4096)
|
|
return nil;
|
|
return p;
|
|
}
|
|
|
|
void
|
|
runtime·SysMap(void *v, uintptr n, uint64 *stat)
|
|
{
|
|
void *p;
|
|
|
|
runtime·xadd64(stat, n);
|
|
|
|
// On 64-bit, we don't actually have v reserved, so tread carefully.
|
|
if(sizeof(void*) == 8) {
|
|
p = runtime·mmap(v, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
|
|
if(p == (void*)ENOMEM)
|
|
runtime·throw("runtime: out of memory");
|
|
if(p != v) {
|
|
runtime·printf("runtime: address space conflict: map(%p) = %p\n", v, p);
|
|
runtime·throw("runtime: address space conflict");
|
|
}
|
|
return;
|
|
}
|
|
|
|
p = runtime·mmap(v, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_FIXED|MAP_PRIVATE, -1, 0);
|
|
if(p == (void*)ENOMEM)
|
|
runtime·throw("runtime: out of memory");
|
|
if(p != v)
|
|
runtime·throw("runtime: cannot map pages in arena address space");
|
|
}
|