1
0
mirror of https://github.com/golang/go synced 2024-10-04 14:31:21 -06:00
go/src/pkg/runtime/mem_windows.c
Keith Randall 1665b006a5 runtime: grow stack by copying
On stack overflow, if all frames on the stack are
copyable, we copy the frames to a new stack twice
as large as the old one.  During GC, if a G is using
less than 1/4 of its stack, copy the stack to a stack
half its size.

TODO
- Do something about C frames.  When a C frame is in the
  stack segment, it isn't copyable.  We allocate a new segment
  in this case.
  - For idempotent C code, we can abort it, copy the stack,
    then retry.  I'm working on a separate CL for this.
  - For other C code, we can raise the stackguard
    to the lowest Go frame so the next call that Go frame
    makes triggers a copy, which will then succeed.
- Pick a starting stack size?

The plan is that eventually we reach a point where the
stack contains only copyable frames.

LGTM=rsc
R=dvyukov, rsc
CC=golang-codereviews
https://golang.org/cl/54650044
2014-02-26 23:28:44 -08:00

99 lines
2.5 KiB
C

// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "runtime.h"
#include "arch_GOARCH.h"
#include "os_GOOS.h"
#include "defs_GOOS_GOARCH.h"
#include "malloc.h"
enum {
MEM_COMMIT = 0x1000,
MEM_RESERVE = 0x2000,
MEM_DECOMMIT = 0x4000,
MEM_RELEASE = 0x8000,
PAGE_READWRITE = 0x0004,
PAGE_NOACCESS = 0x0001,
};
#pragma dynimport runtime·VirtualAlloc VirtualAlloc "kernel32.dll"
#pragma dynimport runtime·VirtualFree VirtualFree "kernel32.dll"
#pragma dynimport runtime·VirtualProtect VirtualProtect "kernel32.dll"
extern void *runtime·VirtualAlloc;
extern void *runtime·VirtualFree;
extern void *runtime·VirtualProtect;
void*
runtime·SysAlloc(uintptr n, uint64 *stat)
{
runtime·xadd64(stat, n);
return runtime·stdcall(runtime·VirtualAlloc, 4, nil, n, (uintptr)(MEM_COMMIT|MEM_RESERVE), (uintptr)PAGE_READWRITE);
}
void
runtime·SysUnused(void *v, uintptr n)
{
void *r;
r = runtime·stdcall(runtime·VirtualFree, 3, v, n, (uintptr)MEM_DECOMMIT);
if(r == nil)
runtime·throw("runtime: failed to decommit pages");
}
void
runtime·SysUsed(void *v, uintptr n)
{
void *r;
r = runtime·stdcall(runtime·VirtualAlloc, 4, v, n, (uintptr)MEM_COMMIT, (uintptr)PAGE_READWRITE);
if(r != v)
runtime·throw("runtime: failed to commit pages");
}
void
runtime·SysFree(void *v, uintptr n, uint64 *stat)
{
uintptr r;
runtime·xadd64(stat, -(uint64)n);
r = (uintptr)runtime·stdcall(runtime·VirtualFree, 3, v, (uintptr)0, (uintptr)MEM_RELEASE);
if(r == 0)
runtime·throw("runtime: failed to release pages");
}
void
runtime·SysFault(void *v, uintptr n)
{
uintptr r, old;
r = (uintptr)runtime·stdcall(runtime·VirtualProtect, 4, v, n, (uintptr)PAGE_NOACCESS, &old);
if(r == 0)
runtime·throw("runtime: failed to protect pages");
}
void*
runtime·SysReserve(void *v, uintptr n)
{
// v is just a hint.
// First try at v.
v = runtime·stdcall(runtime·VirtualAlloc, 4, v, n, (uintptr)MEM_RESERVE, (uintptr)PAGE_READWRITE);
if(v != nil)
return v;
// Next let the kernel choose the address.
return runtime·stdcall(runtime·VirtualAlloc, 4, nil, n, (uintptr)MEM_RESERVE, (uintptr)PAGE_READWRITE);
}
void
runtime·SysMap(void *v, uintptr n, uint64 *stat)
{
void *p;
runtime·xadd64(stat, n);
p = runtime·stdcall(runtime·VirtualAlloc, 4, v, n, (uintptr)MEM_COMMIT, (uintptr)PAGE_READWRITE);
if(p != v)
runtime·throw("runtime: cannot map pages in arena address space");
}