mirror of
https://github.com/golang/go
synced 2024-09-24 13:20:12 -06:00
[dev.cc] runtime: convert panic and stack code from C to Go
The conversion was done with an automated tool and then modified only as necessary to make it compile and run. [This CL is part of the removal of C code from package runtime. See golang.org/s/dev.cc for an overview.] LGTM=r R=r, dave CC=austin, dvyukov, golang-codereviews, iant, khr https://golang.org/cl/166520043
This commit is contained in:
parent
0d49f7b5fc
commit
d98553a727
@ -1,200 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
#include "runtime.h"
|
||||
#include "arch_GOARCH.h"
|
||||
#include "stack.h"
|
||||
#include "malloc.h"
|
||||
#include "textflag.h"
|
||||
|
||||
// Code related to defer, panic and recover.
|
||||
|
||||
// TODO: remove once code is moved to Go
|
||||
extern Defer* runtime·newdefer(int32 siz);
|
||||
extern runtime·freedefer(Defer *d);
|
||||
|
||||
uint32 runtime·panicking;
|
||||
static Mutex paniclk;
|
||||
|
||||
void
|
||||
runtime·deferproc_m(void)
|
||||
{
|
||||
int32 siz;
|
||||
FuncVal *fn;
|
||||
uintptr argp;
|
||||
uintptr callerpc;
|
||||
Defer *d;
|
||||
|
||||
siz = g->m->scalararg[0];
|
||||
fn = g->m->ptrarg[0];
|
||||
argp = g->m->scalararg[1];
|
||||
callerpc = g->m->scalararg[2];
|
||||
g->m->ptrarg[0] = nil;
|
||||
g->m->scalararg[1] = 0;
|
||||
|
||||
d = runtime·newdefer(siz);
|
||||
if(d->panic != nil)
|
||||
runtime·throw("deferproc: d->panic != nil after newdefer");
|
||||
d->fn = fn;
|
||||
d->pc = callerpc;
|
||||
d->argp = argp;
|
||||
runtime·memmove(d+1, (void*)argp, siz);
|
||||
}
|
||||
|
||||
// Unwind the stack after a deferred function calls recover
|
||||
// after a panic. Then arrange to continue running as though
|
||||
// the caller of the deferred function returned normally.
|
||||
void
|
||||
runtime·recovery_m(G *gp)
|
||||
{
|
||||
void *argp;
|
||||
uintptr pc;
|
||||
|
||||
// Info about defer passed in G struct.
|
||||
argp = (void*)gp->sigcode0;
|
||||
pc = (uintptr)gp->sigcode1;
|
||||
|
||||
// d's arguments need to be in the stack.
|
||||
if(argp != nil && ((uintptr)argp < gp->stack.lo || gp->stack.hi < (uintptr)argp)) {
|
||||
runtime·printf("recover: %p not in [%p, %p]\n", argp, gp->stack.lo, gp->stack.hi);
|
||||
runtime·throw("bad recovery");
|
||||
}
|
||||
|
||||
// Make the deferproc for this d return again,
|
||||
// this time returning 1. The calling function will
|
||||
// jump to the standard return epilogue.
|
||||
// The -2*sizeof(uintptr) makes up for the
|
||||
// two extra words that are on the stack at
|
||||
// each call to deferproc.
|
||||
// (The pc we're returning to does pop pop
|
||||
// before it tests the return value.)
|
||||
// On the arm there are 2 saved LRs mixed in too.
|
||||
if(thechar == '5')
|
||||
gp->sched.sp = (uintptr)argp - 4*sizeof(uintptr);
|
||||
else
|
||||
gp->sched.sp = (uintptr)argp - 2*sizeof(uintptr);
|
||||
gp->sched.pc = pc;
|
||||
gp->sched.lr = 0;
|
||||
gp->sched.ret = 1;
|
||||
runtime·gogo(&gp->sched);
|
||||
}
|
||||
|
||||
void
|
||||
runtime·startpanic_m(void)
|
||||
{
|
||||
if(runtime·mheap.cachealloc.size == 0) { // very early
|
||||
runtime·printf("runtime: panic before malloc heap initialized\n");
|
||||
g->m->mallocing = 1; // tell rest of panic not to try to malloc
|
||||
} else if(g->m->mcache == nil) // can happen if called from signal handler or throw
|
||||
g->m->mcache = runtime·allocmcache();
|
||||
switch(g->m->dying) {
|
||||
case 0:
|
||||
g->m->dying = 1;
|
||||
if(g != nil) {
|
||||
g->writebuf.array = nil;
|
||||
g->writebuf.len = 0;
|
||||
g->writebuf.cap = 0;
|
||||
}
|
||||
runtime·xadd(&runtime·panicking, 1);
|
||||
runtime·lock(&paniclk);
|
||||
if(runtime·debug.schedtrace > 0 || runtime·debug.scheddetail > 0)
|
||||
runtime·schedtrace(true);
|
||||
runtime·freezetheworld();
|
||||
return;
|
||||
case 1:
|
||||
// Something failed while panicing, probably the print of the
|
||||
// argument to panic(). Just print a stack trace and exit.
|
||||
g->m->dying = 2;
|
||||
runtime·printf("panic during panic\n");
|
||||
runtime·dopanic(0);
|
||||
runtime·exit(3);
|
||||
case 2:
|
||||
// This is a genuine bug in the runtime, we couldn't even
|
||||
// print the stack trace successfully.
|
||||
g->m->dying = 3;
|
||||
runtime·printf("stack trace unavailable\n");
|
||||
runtime·exit(4);
|
||||
default:
|
||||
// Can't even print! Just exit.
|
||||
runtime·exit(5);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
runtime·dopanic_m(void)
|
||||
{
|
||||
G *gp;
|
||||
uintptr sp, pc;
|
||||
static bool didothers;
|
||||
bool crash;
|
||||
int32 t;
|
||||
|
||||
gp = g->m->ptrarg[0];
|
||||
g->m->ptrarg[0] = nil;
|
||||
pc = g->m->scalararg[0];
|
||||
sp = g->m->scalararg[1];
|
||||
g->m->scalararg[1] = 0;
|
||||
if(gp->sig != 0)
|
||||
runtime·printf("[signal %x code=%p addr=%p pc=%p]\n",
|
||||
gp->sig, gp->sigcode0, gp->sigcode1, gp->sigpc);
|
||||
|
||||
if((t = runtime·gotraceback(&crash)) > 0){
|
||||
if(gp != gp->m->g0) {
|
||||
runtime·printf("\n");
|
||||
runtime·goroutineheader(gp);
|
||||
runtime·traceback(pc, sp, 0, gp);
|
||||
} else if(t >= 2 || g->m->throwing > 0) {
|
||||
runtime·printf("\nruntime stack:\n");
|
||||
runtime·traceback(pc, sp, 0, gp);
|
||||
}
|
||||
if(!didothers) {
|
||||
didothers = true;
|
||||
runtime·tracebackothers(gp);
|
||||
}
|
||||
}
|
||||
runtime·unlock(&paniclk);
|
||||
if(runtime·xadd(&runtime·panicking, -1) != 0) {
|
||||
// Some other m is panicking too.
|
||||
// Let it print what it needs to print.
|
||||
// Wait forever without chewing up cpu.
|
||||
// It will exit when it's done.
|
||||
static Mutex deadlock;
|
||||
runtime·lock(&deadlock);
|
||||
runtime·lock(&deadlock);
|
||||
}
|
||||
|
||||
if(crash)
|
||||
runtime·crash();
|
||||
|
||||
runtime·exit(2);
|
||||
}
|
||||
|
||||
#pragma textflag NOSPLIT
|
||||
bool
|
||||
runtime·canpanic(G *gp)
|
||||
{
|
||||
M *m;
|
||||
uint32 status;
|
||||
|
||||
// Note that g is m->gsignal, different from gp.
|
||||
// Note also that g->m can change at preemption, so m can go stale
|
||||
// if this function ever makes a function call.
|
||||
m = g->m;
|
||||
|
||||
// Is it okay for gp to panic instead of crashing the program?
|
||||
// Yes, as long as it is running Go code, not runtime code,
|
||||
// and not stuck in a system call.
|
||||
if(gp == nil || gp != m->curg)
|
||||
return false;
|
||||
if(m->locks-m->softfloat != 0 || m->mallocing != 0 || m->throwing != 0 || m->gcing != 0 || m->dying != 0)
|
||||
return false;
|
||||
status = runtime·readgstatus(gp);
|
||||
if((status&~Gscan) != Grunning || gp->syscallsp != 0)
|
||||
return false;
|
||||
#ifdef GOOS_windows
|
||||
if(m->libcallsp != 0)
|
||||
return false;
|
||||
#endif
|
||||
return true;
|
||||
}
|
@ -54,6 +54,11 @@ func throwinit() {
|
||||
// The compiler turns a defer statement into a call to this.
|
||||
//go:nosplit
|
||||
func deferproc(siz int32, fn *funcval) { // arguments of fn follow fn
|
||||
if getg().m.curg != getg() {
|
||||
// go code on the m stack can't defer
|
||||
gothrow("defer on m")
|
||||
}
|
||||
|
||||
// the arguments of fn are in a perilous state. The stack map
|
||||
// for deferproc does not describe them. So we can't let garbage
|
||||
// collection or stack copying trigger until we've copied them out
|
||||
@ -64,20 +69,18 @@ func deferproc(siz int32, fn *funcval) { // arguments of fn follow fn
|
||||
if GOARCH == "arm" {
|
||||
argp += ptrSize // skip caller's saved link register
|
||||
}
|
||||
mp := acquirem()
|
||||
mp.scalararg[0] = uintptr(siz)
|
||||
mp.ptrarg[0] = unsafe.Pointer(fn)
|
||||
mp.scalararg[1] = argp
|
||||
mp.scalararg[2] = getcallerpc(unsafe.Pointer(&siz))
|
||||
callerpc := getcallerpc(unsafe.Pointer(&siz))
|
||||
|
||||
if mp.curg != getg() {
|
||||
// go code on the m stack can't defer
|
||||
gothrow("defer on m")
|
||||
onM(func() {
|
||||
d := newdefer(siz)
|
||||
if d._panic != nil {
|
||||
gothrow("deferproc: d.panic != nil after newdefer")
|
||||
}
|
||||
|
||||
onM(deferproc_m)
|
||||
|
||||
releasem(mp)
|
||||
d.fn = fn
|
||||
d.pc = callerpc
|
||||
d.argp = argp
|
||||
memmove(add(unsafe.Pointer(d), unsafe.Sizeof(*d)), unsafe.Pointer(argp), uintptr(siz))
|
||||
})
|
||||
|
||||
// deferproc returns 0 normally.
|
||||
// a deferred func that stops a panic
|
||||
@ -298,8 +301,6 @@ func Goexit() {
|
||||
goexit()
|
||||
}
|
||||
|
||||
func canpanic(*g) bool
|
||||
|
||||
// Print all currently active panics. Used when crashing.
|
||||
func printpanics(p *_panic) {
|
||||
if p.link != nil {
|
||||
@ -318,6 +319,9 @@ func printpanics(p *_panic) {
|
||||
func gopanic(e interface{}) {
|
||||
gp := getg()
|
||||
if gp.m.curg != gp {
|
||||
print("panic: ")
|
||||
printany(e)
|
||||
print("\n")
|
||||
gothrow("panic on m stack")
|
||||
}
|
||||
|
||||
@ -414,7 +418,7 @@ func gopanic(e interface{}) {
|
||||
// Pass information about recovering frame to recovery.
|
||||
gp.sigcode0 = uintptr(argp)
|
||||
gp.sigcode1 = pc
|
||||
mcall(recovery_m)
|
||||
mcall(recovery)
|
||||
gothrow("recovery failed") // mcall should not return
|
||||
}
|
||||
}
|
||||
|
168
src/runtime/panic1.go
Normal file
168
src/runtime/panic1.go
Normal file
@ -0,0 +1,168 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package runtime
|
||||
|
||||
import "unsafe"
|
||||
|
||||
// Code related to defer, panic and recover.
|
||||
// TODO: Merge into panic.go.
|
||||
|
||||
//uint32 runtime·panicking;
|
||||
var paniclk mutex
|
||||
|
||||
const hasLinkRegister = thechar == '5'
|
||||
|
||||
// Unwind the stack after a deferred function calls recover
|
||||
// after a panic. Then arrange to continue running as though
|
||||
// the caller of the deferred function returned normally.
|
||||
func recovery(gp *g) {
|
||||
// Info about defer passed in G struct.
|
||||
argp := (unsafe.Pointer)(gp.sigcode0)
|
||||
pc := uintptr(gp.sigcode1)
|
||||
|
||||
// d's arguments need to be in the stack.
|
||||
if argp != nil && (uintptr(argp) < gp.stack.lo || gp.stack.hi < uintptr(argp)) {
|
||||
print("recover: ", argp, " not in [", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n")
|
||||
gothrow("bad recovery")
|
||||
}
|
||||
|
||||
// Make the deferproc for this d return again,
|
||||
// this time returning 1. The calling function will
|
||||
// jump to the standard return epilogue.
|
||||
// The -2*sizeof(uintptr) makes up for the
|
||||
// two extra words that are on the stack at
|
||||
// each call to deferproc.
|
||||
// (The pc we're returning to does pop pop
|
||||
// before it tests the return value.)
|
||||
// On the arm there are 2 saved LRs mixed in too.
|
||||
if hasLinkRegister {
|
||||
gp.sched.sp = uintptr(argp) - 4*ptrSize
|
||||
} else {
|
||||
gp.sched.sp = uintptr(argp) - 2*ptrSize
|
||||
}
|
||||
gp.sched.pc = pc
|
||||
gp.sched.lr = 0
|
||||
gp.sched.ret = 1
|
||||
gogo(&gp.sched)
|
||||
}
|
||||
|
||||
func startpanic_m() {
|
||||
_g_ := getg()
|
||||
if mheap_.cachealloc.size == 0 { // very early
|
||||
print("runtime: panic before malloc heap initialized\n")
|
||||
_g_.m.mallocing = 1 // tell rest of panic not to try to malloc
|
||||
} else if _g_.m.mcache == nil { // can happen if called from signal handler or throw
|
||||
_g_.m.mcache = allocmcache()
|
||||
}
|
||||
|
||||
switch _g_.m.dying {
|
||||
case 0:
|
||||
_g_.m.dying = 1
|
||||
if _g_ != nil {
|
||||
_g_.writebuf = nil
|
||||
}
|
||||
xadd(&panicking, 1)
|
||||
lock(&paniclk)
|
||||
if debug.schedtrace > 0 || debug.scheddetail > 0 {
|
||||
schedtrace(true)
|
||||
}
|
||||
freezetheworld()
|
||||
return
|
||||
case 1:
|
||||
// Something failed while panicing, probably the print of the
|
||||
// argument to panic(). Just print a stack trace and exit.
|
||||
_g_.m.dying = 2
|
||||
print("panic during panic\n")
|
||||
dopanic(0)
|
||||
exit(3)
|
||||
fallthrough
|
||||
case 2:
|
||||
// This is a genuine bug in the runtime, we couldn't even
|
||||
// print the stack trace successfully.
|
||||
_g_.m.dying = 3
|
||||
print("stack trace unavailable\n")
|
||||
exit(4)
|
||||
fallthrough
|
||||
default:
|
||||
// Can't even print! Just exit.
|
||||
exit(5)
|
||||
}
|
||||
}
|
||||
|
||||
var didothers bool
|
||||
var deadlock mutex
|
||||
|
||||
func dopanic_m() {
|
||||
_g_ := getg()
|
||||
|
||||
gp := (*g)(_g_.m.ptrarg[0])
|
||||
_g_.m.ptrarg[0] = nil
|
||||
pc := uintptr(_g_.m.scalararg[0])
|
||||
sp := uintptr(_g_.m.scalararg[1])
|
||||
_g_.m.scalararg[1] = 0
|
||||
|
||||
if gp.sig != 0 {
|
||||
print("[signal ", hex(gp.sig), " code=", hex(gp.sigcode0), " addr=", hex(gp.sigcode1), " pc=", hex(gp.sigpc), "]\n")
|
||||
}
|
||||
|
||||
var docrash bool
|
||||
if t := gotraceback(&docrash); t > 0 {
|
||||
if gp != gp.m.g0 {
|
||||
print("\n")
|
||||
goroutineheader(gp)
|
||||
traceback(pc, sp, 0, gp)
|
||||
} else if t >= 2 || _g_.m.throwing > 0 {
|
||||
print("\nruntime stack:\n")
|
||||
traceback(pc, sp, 0, gp)
|
||||
}
|
||||
if !didothers {
|
||||
didothers = true
|
||||
tracebackothers(gp)
|
||||
}
|
||||
}
|
||||
unlock(&paniclk)
|
||||
|
||||
if xadd(&panicking, -1) != 0 {
|
||||
// Some other m is panicking too.
|
||||
// Let it print what it needs to print.
|
||||
// Wait forever without chewing up cpu.
|
||||
// It will exit when it's done.
|
||||
lock(&deadlock)
|
||||
lock(&deadlock)
|
||||
}
|
||||
|
||||
if docrash {
|
||||
crash()
|
||||
}
|
||||
|
||||
exit(2)
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func canpanic(gp *g) bool {
|
||||
// Note that g is m->gsignal, different from gp.
|
||||
// Note also that g->m can change at preemption, so m can go stale
|
||||
// if this function ever makes a function call.
|
||||
_g_ := getg()
|
||||
_m_ := _g_.m
|
||||
|
||||
// Is it okay for gp to panic instead of crashing the program?
|
||||
// Yes, as long as it is running Go code, not runtime code,
|
||||
// and not stuck in a system call.
|
||||
if gp == nil || gp != _m_.curg {
|
||||
return false
|
||||
}
|
||||
if _m_.locks-_m_.softfloat != 0 || _m_.mallocing != 0 || _m_.throwing != 0 || _m_.gcing != 0 || _m_.dying != 0 {
|
||||
return false
|
||||
}
|
||||
status := readgstatus(gp)
|
||||
if status&^_Gscan != _Grunning || gp.syscallsp != 0 {
|
||||
return false
|
||||
}
|
||||
if GOOS == "windows" && _m_.libcallsp != 0 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
@ -1,897 +0,0 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
#include "runtime.h"
|
||||
#include "arch_GOARCH.h"
|
||||
#include "malloc.h"
|
||||
#include "stack.h"
|
||||
#include "funcdata.h"
|
||||
#include "typekind.h"
|
||||
#include "type.h"
|
||||
#include "race.h"
|
||||
#include "mgc0.h"
|
||||
#include "textflag.h"
|
||||
|
||||
enum
|
||||
{
|
||||
// StackDebug == 0: no logging
|
||||
// == 1: logging of per-stack operations
|
||||
// == 2: logging of per-frame operations
|
||||
// == 3: logging of per-word updates
|
||||
// == 4: logging of per-word reads
|
||||
StackDebug = 0,
|
||||
StackFromSystem = 0, // allocate stacks from system memory instead of the heap
|
||||
StackFaultOnFree = 0, // old stacks are mapped noaccess to detect use after free
|
||||
StackPoisonCopy = 0, // fill stack that should not be accessed with garbage, to detect bad dereferences during copy
|
||||
|
||||
StackCache = 1,
|
||||
};
|
||||
|
||||
// Global pool of spans that have free stacks.
|
||||
// Stacks are assigned an order according to size.
|
||||
// order = log_2(size/FixedStack)
|
||||
// There is a free list for each order.
|
||||
MSpan runtime·stackpool[NumStackOrders];
|
||||
Mutex runtime·stackpoolmu;
|
||||
// TODO: one lock per order?
|
||||
|
||||
static Stack stackfreequeue;
|
||||
|
||||
void
|
||||
runtime·stackinit(void)
|
||||
{
|
||||
int32 i;
|
||||
|
||||
if((StackCacheSize & PageMask) != 0)
|
||||
runtime·throw("cache size must be a multiple of page size");
|
||||
|
||||
for(i = 0; i < NumStackOrders; i++)
|
||||
runtime·MSpanList_Init(&runtime·stackpool[i]);
|
||||
}
|
||||
|
||||
// Allocates a stack from the free pool. Must be called with
|
||||
// stackpoolmu held.
|
||||
static MLink*
|
||||
poolalloc(uint8 order)
|
||||
{
|
||||
MSpan *list;
|
||||
MSpan *s;
|
||||
MLink *x;
|
||||
uintptr i;
|
||||
|
||||
list = &runtime·stackpool[order];
|
||||
s = list->next;
|
||||
if(s == list) {
|
||||
// no free stacks. Allocate another span worth.
|
||||
s = runtime·MHeap_AllocStack(&runtime·mheap, StackCacheSize >> PageShift);
|
||||
if(s == nil)
|
||||
runtime·throw("out of memory");
|
||||
if(s->ref != 0)
|
||||
runtime·throw("bad ref");
|
||||
if(s->freelist != nil)
|
||||
runtime·throw("bad freelist");
|
||||
for(i = 0; i < StackCacheSize; i += FixedStack << order) {
|
||||
x = (MLink*)((s->start << PageShift) + i);
|
||||
x->next = s->freelist;
|
||||
s->freelist = x;
|
||||
}
|
||||
runtime·MSpanList_Insert(list, s);
|
||||
}
|
||||
x = s->freelist;
|
||||
if(x == nil)
|
||||
runtime·throw("span has no free stacks");
|
||||
s->freelist = x->next;
|
||||
s->ref++;
|
||||
if(s->freelist == nil) {
|
||||
// all stacks in s are allocated.
|
||||
runtime·MSpanList_Remove(s);
|
||||
}
|
||||
return x;
|
||||
}
|
||||
|
||||
// Adds stack x to the free pool. Must be called with stackpoolmu held.
|
||||
static void
|
||||
poolfree(MLink *x, uint8 order)
|
||||
{
|
||||
MSpan *s;
|
||||
|
||||
s = runtime·MHeap_Lookup(&runtime·mheap, x);
|
||||
if(s->state != MSpanStack)
|
||||
runtime·throw("freeing stack not in a stack span");
|
||||
if(s->freelist == nil) {
|
||||
// s will now have a free stack
|
||||
runtime·MSpanList_Insert(&runtime·stackpool[order], s);
|
||||
}
|
||||
x->next = s->freelist;
|
||||
s->freelist = x;
|
||||
s->ref--;
|
||||
if(s->ref == 0) {
|
||||
// span is completely free - return to heap
|
||||
runtime·MSpanList_Remove(s);
|
||||
s->freelist = nil;
|
||||
runtime·MHeap_FreeStack(&runtime·mheap, s);
|
||||
}
|
||||
}
|
||||
|
||||
// stackcacherefill/stackcacherelease implement a global pool of stack segments.
|
||||
// The pool is required to prevent unlimited growth of per-thread caches.
|
||||
static void
|
||||
stackcacherefill(MCache *c, uint8 order)
|
||||
{
|
||||
MLink *x, *list;
|
||||
uintptr size;
|
||||
|
||||
if(StackDebug >= 1)
|
||||
runtime·printf("stackcacherefill order=%d\n", order);
|
||||
|
||||
// Grab some stacks from the global cache.
|
||||
// Grab half of the allowed capacity (to prevent thrashing).
|
||||
list = nil;
|
||||
size = 0;
|
||||
runtime·lock(&runtime·stackpoolmu);
|
||||
while(size < StackCacheSize/2) {
|
||||
x = poolalloc(order);
|
||||
x->next = list;
|
||||
list = x;
|
||||
size += FixedStack << order;
|
||||
}
|
||||
runtime·unlock(&runtime·stackpoolmu);
|
||||
|
||||
c->stackcache[order].list = list;
|
||||
c->stackcache[order].size = size;
|
||||
}
|
||||
|
||||
static void
|
||||
stackcacherelease(MCache *c, uint8 order)
|
||||
{
|
||||
MLink *x, *y;
|
||||
uintptr size;
|
||||
|
||||
if(StackDebug >= 1)
|
||||
runtime·printf("stackcacherelease order=%d\n", order);
|
||||
x = c->stackcache[order].list;
|
||||
size = c->stackcache[order].size;
|
||||
runtime·lock(&runtime·stackpoolmu);
|
||||
while(size > StackCacheSize/2) {
|
||||
y = x->next;
|
||||
poolfree(x, order);
|
||||
x = y;
|
||||
size -= FixedStack << order;
|
||||
}
|
||||
runtime·unlock(&runtime·stackpoolmu);
|
||||
c->stackcache[order].list = x;
|
||||
c->stackcache[order].size = size;
|
||||
}
|
||||
|
||||
void
|
||||
runtime·stackcache_clear(MCache *c)
|
||||
{
|
||||
uint8 order;
|
||||
MLink *x, *y;
|
||||
|
||||
if(StackDebug >= 1)
|
||||
runtime·printf("stackcache clear\n");
|
||||
runtime·lock(&runtime·stackpoolmu);
|
||||
for(order = 0; order < NumStackOrders; order++) {
|
||||
x = c->stackcache[order].list;
|
||||
while(x != nil) {
|
||||
y = x->next;
|
||||
poolfree(x, order);
|
||||
x = y;
|
||||
}
|
||||
c->stackcache[order].list = nil;
|
||||
c->stackcache[order].size = 0;
|
||||
}
|
||||
runtime·unlock(&runtime·stackpoolmu);
|
||||
}
|
||||
|
||||
Stack
|
||||
runtime·stackalloc(uint32 n)
|
||||
{
|
||||
uint8 order;
|
||||
uint32 n2;
|
||||
void *v;
|
||||
MLink *x;
|
||||
MSpan *s;
|
||||
MCache *c;
|
||||
|
||||
// Stackalloc must be called on scheduler stack, so that we
|
||||
// never try to grow the stack during the code that stackalloc runs.
|
||||
// Doing so would cause a deadlock (issue 1547).
|
||||
if(g != g->m->g0)
|
||||
runtime·throw("stackalloc not on scheduler stack");
|
||||
if((n & (n-1)) != 0)
|
||||
runtime·throw("stack size not a power of 2");
|
||||
if(StackDebug >= 1)
|
||||
runtime·printf("stackalloc %d\n", n);
|
||||
|
||||
if(runtime·debug.efence || StackFromSystem) {
|
||||
v = runtime·sysAlloc(ROUND(n, PageSize), &mstats.stacks_sys);
|
||||
if(v == nil)
|
||||
runtime·throw("out of memory (stackalloc)");
|
||||
return (Stack){(uintptr)v, (uintptr)v+n};
|
||||
}
|
||||
|
||||
// Small stacks are allocated with a fixed-size free-list allocator.
|
||||
// If we need a stack of a bigger size, we fall back on allocating
|
||||
// a dedicated span.
|
||||
if(StackCache && n < FixedStack << NumStackOrders && n < StackCacheSize) {
|
||||
order = 0;
|
||||
n2 = n;
|
||||
while(n2 > FixedStack) {
|
||||
order++;
|
||||
n2 >>= 1;
|
||||
}
|
||||
c = g->m->mcache;
|
||||
if(c == nil || g->m->gcing || g->m->helpgc) {
|
||||
// c == nil can happen in the guts of exitsyscall or
|
||||
// procresize. Just get a stack from the global pool.
|
||||
// Also don't touch stackcache during gc
|
||||
// as it's flushed concurrently.
|
||||
runtime·lock(&runtime·stackpoolmu);
|
||||
x = poolalloc(order);
|
||||
runtime·unlock(&runtime·stackpoolmu);
|
||||
} else {
|
||||
x = c->stackcache[order].list;
|
||||
if(x == nil) {
|
||||
stackcacherefill(c, order);
|
||||
x = c->stackcache[order].list;
|
||||
}
|
||||
c->stackcache[order].list = x->next;
|
||||
c->stackcache[order].size -= n;
|
||||
}
|
||||
v = (byte*)x;
|
||||
} else {
|
||||
s = runtime·MHeap_AllocStack(&runtime·mheap, ROUND(n, PageSize) >> PageShift);
|
||||
if(s == nil)
|
||||
runtime·throw("out of memory");
|
||||
v = (byte*)(s->start<<PageShift);
|
||||
}
|
||||
|
||||
if(raceenabled)
|
||||
runtime·racemalloc(v, n);
|
||||
if(StackDebug >= 1)
|
||||
runtime·printf(" allocated %p\n", v);
|
||||
return (Stack){(uintptr)v, (uintptr)v+n};
|
||||
}
|
||||
|
||||
void
|
||||
runtime·stackfree(Stack stk)
|
||||
{
|
||||
uint8 order;
|
||||
uintptr n, n2;
|
||||
MSpan *s;
|
||||
MLink *x;
|
||||
MCache *c;
|
||||
void *v;
|
||||
|
||||
n = stk.hi - stk.lo;
|
||||
v = (void*)stk.lo;
|
||||
if(n & (n-1))
|
||||
runtime·throw("stack not a power of 2");
|
||||
if(StackDebug >= 1) {
|
||||
runtime·printf("stackfree %p %d\n", v, (int32)n);
|
||||
runtime·memclr(v, n); // for testing, clobber stack data
|
||||
}
|
||||
if(runtime·debug.efence || StackFromSystem) {
|
||||
if(runtime·debug.efence || StackFaultOnFree)
|
||||
runtime·SysFault(v, n);
|
||||
else
|
||||
runtime·SysFree(v, n, &mstats.stacks_sys);
|
||||
return;
|
||||
}
|
||||
if(StackCache && n < FixedStack << NumStackOrders && n < StackCacheSize) {
|
||||
order = 0;
|
||||
n2 = n;
|
||||
while(n2 > FixedStack) {
|
||||
order++;
|
||||
n2 >>= 1;
|
||||
}
|
||||
x = (MLink*)v;
|
||||
c = g->m->mcache;
|
||||
if(c == nil || g->m->gcing || g->m->helpgc) {
|
||||
runtime·lock(&runtime·stackpoolmu);
|
||||
poolfree(x, order);
|
||||
runtime·unlock(&runtime·stackpoolmu);
|
||||
} else {
|
||||
if(c->stackcache[order].size >= StackCacheSize)
|
||||
stackcacherelease(c, order);
|
||||
x->next = c->stackcache[order].list;
|
||||
c->stackcache[order].list = x;
|
||||
c->stackcache[order].size += n;
|
||||
}
|
||||
} else {
|
||||
s = runtime·MHeap_Lookup(&runtime·mheap, v);
|
||||
if(s->state != MSpanStack) {
|
||||
runtime·printf("%p %p\n", s->start<<PageShift, v);
|
||||
runtime·throw("bad span state");
|
||||
}
|
||||
runtime·MHeap_FreeStack(&runtime·mheap, s);
|
||||
}
|
||||
}
|
||||
|
||||
uintptr runtime·maxstacksize = 1<<20; // enough until runtime.main sets it for real
|
||||
|
||||
static uint8*
|
||||
mapnames[] = {
|
||||
(uint8*)"---",
|
||||
(uint8*)"scalar",
|
||||
(uint8*)"ptr",
|
||||
(uint8*)"multi",
|
||||
};
|
||||
|
||||
// Stack frame layout
|
||||
//
|
||||
// (x86)
|
||||
// +------------------+
|
||||
// | args from caller |
|
||||
// +------------------+ <- frame->argp
|
||||
// | return address |
|
||||
// +------------------+ <- frame->varp
|
||||
// | locals |
|
||||
// +------------------+
|
||||
// | args to callee |
|
||||
// +------------------+ <- frame->sp
|
||||
//
|
||||
// (arm)
|
||||
// +------------------+
|
||||
// | args from caller |
|
||||
// +------------------+ <- frame->argp
|
||||
// | caller's retaddr |
|
||||
// +------------------+ <- frame->varp
|
||||
// | locals |
|
||||
// +------------------+
|
||||
// | args to callee |
|
||||
// +------------------+
|
||||
// | return address |
|
||||
// +------------------+ <- frame->sp
|
||||
|
||||
void runtime·main(void);
|
||||
void runtime·switchtoM(void(*)(void));
|
||||
|
||||
typedef struct AdjustInfo AdjustInfo;
|
||||
struct AdjustInfo {
|
||||
Stack old;
|
||||
uintptr delta; // ptr distance from old to new stack (newbase - oldbase)
|
||||
};
|
||||
|
||||
// Adjustpointer checks whether *vpp is in the old stack described by adjinfo.
|
||||
// If so, it rewrites *vpp to point into the new stack.
|
||||
static void
|
||||
adjustpointer(AdjustInfo *adjinfo, void *vpp)
|
||||
{
|
||||
byte **pp, *p;
|
||||
|
||||
pp = vpp;
|
||||
p = *pp;
|
||||
if(StackDebug >= 4)
|
||||
runtime·printf(" %p:%p\n", pp, p);
|
||||
if(adjinfo->old.lo <= (uintptr)p && (uintptr)p < adjinfo->old.hi) {
|
||||
*pp = p + adjinfo->delta;
|
||||
if(StackDebug >= 3)
|
||||
runtime·printf(" adjust ptr %p: %p -> %p\n", pp, p, *pp);
|
||||
}
|
||||
}
|
||||
|
||||
// bv describes the memory starting at address scanp.
|
||||
// Adjust any pointers contained therein.
|
||||
static void
|
||||
adjustpointers(byte **scanp, BitVector *bv, AdjustInfo *adjinfo, Func *f)
|
||||
{
|
||||
uintptr delta;
|
||||
int32 num, i;
|
||||
byte *p, *minp, *maxp;
|
||||
Type *t;
|
||||
Itab *tab;
|
||||
|
||||
minp = (byte*)adjinfo->old.lo;
|
||||
maxp = (byte*)adjinfo->old.hi;
|
||||
delta = adjinfo->delta;
|
||||
num = bv->n / BitsPerPointer;
|
||||
for(i = 0; i < num; i++) {
|
||||
if(StackDebug >= 4)
|
||||
runtime·printf(" %p:%s:%p\n", &scanp[i], mapnames[bv->bytedata[i / (8 / BitsPerPointer)] >> (i * BitsPerPointer & 7) & 3], scanp[i]);
|
||||
switch(bv->bytedata[i / (8 / BitsPerPointer)] >> (i * BitsPerPointer & 7) & 3) {
|
||||
case BitsDead:
|
||||
if(runtime·debug.gcdead)
|
||||
scanp[i] = (byte*)PoisonStack;
|
||||
break;
|
||||
case BitsScalar:
|
||||
break;
|
||||
case BitsPointer:
|
||||
p = scanp[i];
|
||||
if(f != nil && (byte*)0 < p && (p < (byte*)PageSize && runtime·invalidptr || (uintptr)p == PoisonGC || (uintptr)p == PoisonStack)) {
|
||||
// Looks like a junk value in a pointer slot.
|
||||
// Live analysis wrong?
|
||||
g->m->traceback = 2;
|
||||
runtime·printf("runtime: bad pointer in frame %s at %p: %p\n", runtime·funcname(f), &scanp[i], p);
|
||||
runtime·throw("invalid stack pointer");
|
||||
}
|
||||
if(minp <= p && p < maxp) {
|
||||
if(StackDebug >= 3)
|
||||
runtime·printf("adjust ptr %p %s\n", p, runtime·funcname(f));
|
||||
scanp[i] = p + delta;
|
||||
}
|
||||
break;
|
||||
case BitsMultiWord:
|
||||
switch(bv->bytedata[(i+1) / (8 / BitsPerPointer)] >> ((i+1) * BitsPerPointer & 7) & 3) {
|
||||
default:
|
||||
runtime·throw("unexpected garbage collection bits");
|
||||
case BitsEface:
|
||||
t = (Type*)scanp[i];
|
||||
if(t != nil && ((t->kind & KindDirectIface) == 0 || (t->kind & KindNoPointers) == 0)) {
|
||||
p = scanp[i+1];
|
||||
if(minp <= p && p < maxp) {
|
||||
if(StackDebug >= 3)
|
||||
runtime·printf("adjust eface %p\n", p);
|
||||
if(t->size > PtrSize) // currently we always allocate such objects on the heap
|
||||
runtime·throw("large interface value found on stack");
|
||||
scanp[i+1] = p + delta;
|
||||
}
|
||||
}
|
||||
i++;
|
||||
break;
|
||||
case BitsIface:
|
||||
tab = (Itab*)scanp[i];
|
||||
if(tab != nil) {
|
||||
t = tab->type;
|
||||
//runtime·printf(" type=%p\n", t);
|
||||
if((t->kind & KindDirectIface) == 0 || (t->kind & KindNoPointers) == 0) {
|
||||
p = scanp[i+1];
|
||||
if(minp <= p && p < maxp) {
|
||||
if(StackDebug >= 3)
|
||||
runtime·printf("adjust iface %p\n", p);
|
||||
if(t->size > PtrSize) // currently we always allocate such objects on the heap
|
||||
runtime·throw("large interface value found on stack");
|
||||
scanp[i+1] = p + delta;
|
||||
}
|
||||
}
|
||||
}
|
||||
i++;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Note: the argument/return area is adjusted by the callee.
|
||||
static bool
|
||||
adjustframe(Stkframe *frame, void *arg)
|
||||
{
|
||||
AdjustInfo *adjinfo;
|
||||
Func *f;
|
||||
StackMap *stackmap;
|
||||
int32 pcdata;
|
||||
BitVector bv;
|
||||
uintptr targetpc, size, minsize;
|
||||
|
||||
adjinfo = arg;
|
||||
targetpc = frame->continpc;
|
||||
if(targetpc == 0) {
|
||||
// Frame is dead.
|
||||
return true;
|
||||
}
|
||||
f = frame->fn;
|
||||
if(StackDebug >= 2)
|
||||
runtime·printf(" adjusting %s frame=[%p,%p] pc=%p continpc=%p\n", runtime·funcname(f), frame->sp, frame->fp, frame->pc, frame->continpc);
|
||||
if(f->entry == (uintptr)runtime·switchtoM) {
|
||||
// A special routine at the bottom of stack of a goroutine that does an onM call.
|
||||
// We will allow it to be copied even though we don't
|
||||
// have full GC info for it (because it is written in asm).
|
||||
return true;
|
||||
}
|
||||
if(targetpc != f->entry)
|
||||
targetpc--;
|
||||
pcdata = runtime·pcdatavalue(f, PCDATA_StackMapIndex, targetpc);
|
||||
if(pcdata == -1)
|
||||
pcdata = 0; // in prologue
|
||||
|
||||
// Adjust local variables if stack frame has been allocated.
|
||||
size = frame->varp - frame->sp;
|
||||
if(thechar != '6' && thechar != '8')
|
||||
minsize = sizeof(uintptr);
|
||||
else
|
||||
minsize = 0;
|
||||
if(size > minsize) {
|
||||
stackmap = runtime·funcdata(f, FUNCDATA_LocalsPointerMaps);
|
||||
if(stackmap == nil || stackmap->n <= 0) {
|
||||
runtime·printf("runtime: frame %s untyped locals %p+%p\n", runtime·funcname(f), (byte*)(frame->varp-size), size);
|
||||
runtime·throw("missing stackmap");
|
||||
}
|
||||
// Locals bitmap information, scan just the pointers in locals.
|
||||
if(pcdata < 0 || pcdata >= stackmap->n) {
|
||||
// don't know where we are
|
||||
runtime·printf("runtime: pcdata is %d and %d locals stack map entries for %s (targetpc=%p)\n",
|
||||
pcdata, stackmap->n, runtime·funcname(f), targetpc);
|
||||
runtime·throw("bad symbol table");
|
||||
}
|
||||
bv = runtime·stackmapdata(stackmap, pcdata);
|
||||
size = (bv.n * PtrSize) / BitsPerPointer;
|
||||
if(StackDebug >= 3)
|
||||
runtime·printf(" locals\n");
|
||||
adjustpointers((byte**)(frame->varp - size), &bv, adjinfo, f);
|
||||
}
|
||||
|
||||
// Adjust arguments.
|
||||
if(frame->arglen > 0) {
|
||||
if(frame->argmap != nil) {
|
||||
bv = *frame->argmap;
|
||||
} else {
|
||||
stackmap = runtime·funcdata(f, FUNCDATA_ArgsPointerMaps);
|
||||
if(stackmap == nil || stackmap->n <= 0) {
|
||||
runtime·printf("runtime: frame %s untyped args %p+%p\n", runtime·funcname(f), frame->argp, (uintptr)frame->arglen);
|
||||
runtime·throw("missing stackmap");
|
||||
}
|
||||
if(pcdata < 0 || pcdata >= stackmap->n) {
|
||||
// don't know where we are
|
||||
runtime·printf("runtime: pcdata is %d and %d args stack map entries for %s (targetpc=%p)\n",
|
||||
pcdata, stackmap->n, runtime·funcname(f), targetpc);
|
||||
runtime·throw("bad symbol table");
|
||||
}
|
||||
bv = runtime·stackmapdata(stackmap, pcdata);
|
||||
}
|
||||
if(StackDebug >= 3)
|
||||
runtime·printf(" args\n");
|
||||
adjustpointers((byte**)frame->argp, &bv, adjinfo, nil);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void
|
||||
adjustctxt(G *gp, AdjustInfo *adjinfo)
|
||||
{
|
||||
adjustpointer(adjinfo, &gp->sched.ctxt);
|
||||
}
|
||||
|
||||
static void
|
||||
adjustdefers(G *gp, AdjustInfo *adjinfo)
|
||||
{
|
||||
Defer *d;
|
||||
bool (*cb)(Stkframe*, void*);
|
||||
|
||||
// Adjust defer argument blocks the same way we adjust active stack frames.
|
||||
cb = adjustframe;
|
||||
runtime·tracebackdefers(gp, &cb, adjinfo);
|
||||
|
||||
// Adjust pointers in the Defer structs.
|
||||
// Defer structs themselves are never on the stack.
|
||||
for(d = gp->defer; d != nil; d = d->link) {
|
||||
adjustpointer(adjinfo, &d->fn);
|
||||
adjustpointer(adjinfo, &d->argp);
|
||||
adjustpointer(adjinfo, &d->panic);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
adjustpanics(G *gp, AdjustInfo *adjinfo)
|
||||
{
|
||||
// Panics are on stack and already adjusted.
|
||||
// Update pointer to head of list in G.
|
||||
adjustpointer(adjinfo, &gp->panic);
|
||||
}
|
||||
|
||||
static void
|
||||
adjustsudogs(G *gp, AdjustInfo *adjinfo)
|
||||
{
|
||||
SudoG *s;
|
||||
|
||||
// the data elements pointed to by a SudoG structure
|
||||
// might be in the stack.
|
||||
for(s = gp->waiting; s != nil; s = s->waitlink) {
|
||||
adjustpointer(adjinfo, &s->elem);
|
||||
adjustpointer(adjinfo, &s->selectdone);
|
||||
}
|
||||
}
|
||||
|
||||
// Copies gp's stack to a new stack of a different size.
|
||||
static void
|
||||
copystack(G *gp, uintptr newsize)
|
||||
{
|
||||
Stack old, new;
|
||||
uintptr used;
|
||||
AdjustInfo adjinfo;
|
||||
uint32 oldstatus;
|
||||
bool (*cb)(Stkframe*, void*);
|
||||
byte *p, *ep;
|
||||
|
||||
if(gp->syscallsp != 0)
|
||||
runtime·throw("stack growth not allowed in system call");
|
||||
old = gp->stack;
|
||||
if(old.lo == 0)
|
||||
runtime·throw("nil stackbase");
|
||||
used = old.hi - gp->sched.sp;
|
||||
|
||||
// allocate new stack
|
||||
new = runtime·stackalloc(newsize);
|
||||
if(StackPoisonCopy) {
|
||||
p = (byte*)new.lo;
|
||||
ep = (byte*)new.hi;
|
||||
while(p < ep)
|
||||
*p++ = 0xfd;
|
||||
}
|
||||
|
||||
if(StackDebug >= 1)
|
||||
runtime·printf("copystack gp=%p [%p %p %p]/%d -> [%p %p %p]/%d\n", gp, old.lo, old.hi-used, old.hi, (int32)(old.hi-old.lo), new.lo, new.hi-used, new.hi, (int32)newsize);
|
||||
|
||||
// adjust pointers in the to-be-copied frames
|
||||
adjinfo.old = old;
|
||||
adjinfo.delta = new.hi - old.hi;
|
||||
cb = adjustframe;
|
||||
runtime·gentraceback(~(uintptr)0, ~(uintptr)0, 0, gp, 0, nil, 0x7fffffff, &cb, &adjinfo, 0);
|
||||
|
||||
// adjust other miscellaneous things that have pointers into stacks.
|
||||
adjustctxt(gp, &adjinfo);
|
||||
adjustdefers(gp, &adjinfo);
|
||||
adjustpanics(gp, &adjinfo);
|
||||
adjustsudogs(gp, &adjinfo);
|
||||
|
||||
// copy the stack to the new location
|
||||
if(StackPoisonCopy) {
|
||||
p = (byte*)new.lo;
|
||||
ep = (byte*)new.hi;
|
||||
while(p < ep)
|
||||
*p++ = 0xfb;
|
||||
}
|
||||
runtime·memmove((byte*)new.hi - used, (byte*)old.hi - used, used);
|
||||
|
||||
oldstatus = runtime·readgstatus(gp);
|
||||
oldstatus &= ~Gscan;
|
||||
if(oldstatus == Gwaiting || oldstatus == Grunnable)
|
||||
runtime·casgstatus(gp, oldstatus, Gcopystack); // oldstatus is Gwaiting or Grunnable
|
||||
else
|
||||
runtime·throw("copystack: bad status, not Gwaiting or Grunnable");
|
||||
|
||||
// Swap out old stack for new one
|
||||
gp->stack = new;
|
||||
gp->stackguard0 = new.lo + StackGuard; // NOTE: might clobber a preempt request
|
||||
gp->sched.sp = new.hi - used;
|
||||
|
||||
runtime·casgstatus(gp, Gcopystack, oldstatus); // oldstatus is Gwaiting or Grunnable
|
||||
|
||||
// free old stack
|
||||
if(StackPoisonCopy) {
|
||||
p = (byte*)old.lo;
|
||||
ep = (byte*)old.hi;
|
||||
while(p < ep)
|
||||
*p++ = 0xfc;
|
||||
}
|
||||
if(newsize > old.hi-old.lo) {
|
||||
// growing, free stack immediately
|
||||
runtime·stackfree(old);
|
||||
} else {
|
||||
// shrinking, queue up free operation. We can't actually free the stack
|
||||
// just yet because we might run into the following situation:
|
||||
// 1) GC starts, scans a SudoG but does not yet mark the SudoG.elem pointer
|
||||
// 2) The stack that pointer points to is shrunk
|
||||
// 3) The old stack is freed
|
||||
// 4) The containing span is marked free
|
||||
// 5) GC attempts to mark the SudoG.elem pointer. The marking fails because
|
||||
// the pointer looks like a pointer into a free span.
|
||||
// By not freeing, we prevent step #4 until GC is done.
|
||||
runtime·lock(&runtime·stackpoolmu);
|
||||
*(Stack*)old.lo = stackfreequeue;
|
||||
stackfreequeue = old;
|
||||
runtime·unlock(&runtime·stackpoolmu);
|
||||
}
|
||||
}
|
||||
|
||||
// round x up to a power of 2.
|
||||
int32
|
||||
runtime·round2(int32 x)
|
||||
{
|
||||
int32 s;
|
||||
|
||||
s = 0;
|
||||
while((1 << s) < x)
|
||||
s++;
|
||||
return 1 << s;
|
||||
}
|
||||
|
||||
// Called from runtime·morestack when more stack is needed.
|
||||
// Allocate larger stack and relocate to new stack.
|
||||
// Stack growth is multiplicative, for constant amortized cost.
|
||||
//
|
||||
// g->atomicstatus will be Grunning or Gscanrunning upon entry.
|
||||
// If the GC is trying to stop this g then it will set preemptscan to true.
|
||||
void
|
||||
runtime·newstack(void)
|
||||
{
|
||||
int32 oldsize, newsize;
|
||||
uintptr sp;
|
||||
G *gp;
|
||||
Gobuf morebuf;
|
||||
|
||||
if(g->m->morebuf.g->stackguard0 == (uintptr)StackFork)
|
||||
runtime·throw("stack growth after fork");
|
||||
if(g->m->morebuf.g != g->m->curg) {
|
||||
runtime·printf("runtime: newstack called from g=%p\n"
|
||||
"\tm=%p m->curg=%p m->g0=%p m->gsignal=%p\n",
|
||||
g->m->morebuf.g, g->m, g->m->curg, g->m->g0, g->m->gsignal);
|
||||
morebuf = g->m->morebuf;
|
||||
runtime·traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g);
|
||||
runtime·throw("runtime: wrong goroutine in newstack");
|
||||
}
|
||||
if(g->m->curg->throwsplit)
|
||||
runtime·throw("runtime: stack split at bad time");
|
||||
|
||||
// The goroutine must be executing in order to call newstack,
|
||||
// so it must be Grunning or Gscanrunning.
|
||||
|
||||
gp = g->m->curg;
|
||||
morebuf = g->m->morebuf;
|
||||
g->m->morebuf.pc = (uintptr)nil;
|
||||
g->m->morebuf.lr = (uintptr)nil;
|
||||
g->m->morebuf.sp = (uintptr)nil;
|
||||
g->m->morebuf.g = (G*)nil;
|
||||
|
||||
runtime·casgstatus(gp, Grunning, Gwaiting);
|
||||
gp->waitreason = runtime·gostringnocopy((byte*)"stack growth");
|
||||
|
||||
runtime·rewindmorestack(&gp->sched);
|
||||
|
||||
if(gp->stack.lo == 0)
|
||||
runtime·throw("missing stack in newstack");
|
||||
sp = gp->sched.sp;
|
||||
if(thechar == '6' || thechar == '8') {
|
||||
// The call to morestack cost a word.
|
||||
sp -= sizeof(uintreg);
|
||||
}
|
||||
if(StackDebug >= 1 || sp < gp->stack.lo) {
|
||||
runtime·printf("runtime: newstack sp=%p stack=[%p, %p]\n"
|
||||
"\tmorebuf={pc:%p sp:%p lr:%p}\n"
|
||||
"\tsched={pc:%p sp:%p lr:%p ctxt:%p}\n",
|
||||
sp, gp->stack.lo, gp->stack.hi,
|
||||
g->m->morebuf.pc, g->m->morebuf.sp, g->m->morebuf.lr,
|
||||
gp->sched.pc, gp->sched.sp, gp->sched.lr, gp->sched.ctxt);
|
||||
}
|
||||
if(sp < gp->stack.lo) {
|
||||
runtime·printf("runtime: gp=%p, gp->status=%d\n ", (void*)gp, runtime·readgstatus(gp));
|
||||
runtime·printf("runtime: split stack overflow: %p < %p\n", sp, gp->stack.lo);
|
||||
runtime·throw("runtime: split stack overflow");
|
||||
}
|
||||
|
||||
if(gp->stackguard0 == (uintptr)StackPreempt) {
|
||||
if(gp == g->m->g0)
|
||||
runtime·throw("runtime: preempt g0");
|
||||
if(g->m->p == nil && g->m->locks == 0)
|
||||
runtime·throw("runtime: g is running but p is not");
|
||||
if(gp->preemptscan) {
|
||||
runtime·gcphasework(gp);
|
||||
runtime·casgstatus(gp, Gwaiting, Grunning);
|
||||
gp->stackguard0 = gp->stack.lo + StackGuard;
|
||||
gp->preempt = false;
|
||||
gp->preemptscan = false; // Tells the GC premption was successful.
|
||||
runtime·gogo(&gp->sched); // never return
|
||||
}
|
||||
|
||||
// Be conservative about where we preempt.
|
||||
// We are interested in preempting user Go code, not runtime code.
|
||||
if(g->m->locks || g->m->mallocing || g->m->gcing || g->m->p->status != Prunning) {
|
||||
// Let the goroutine keep running for now.
|
||||
// gp->preempt is set, so it will be preempted next time.
|
||||
gp->stackguard0 = gp->stack.lo + StackGuard;
|
||||
runtime·casgstatus(gp, Gwaiting, Grunning);
|
||||
runtime·gogo(&gp->sched); // never return
|
||||
}
|
||||
// Act like goroutine called runtime.Gosched.
|
||||
runtime·casgstatus(gp, Gwaiting, Grunning);
|
||||
runtime·gosched_m(gp); // never return
|
||||
}
|
||||
|
||||
// Allocate a bigger segment and move the stack.
|
||||
oldsize = gp->stack.hi - gp->stack.lo;
|
||||
newsize = oldsize * 2;
|
||||
if(newsize > runtime·maxstacksize) {
|
||||
runtime·printf("runtime: goroutine stack exceeds %D-byte limit\n", (uint64)runtime·maxstacksize);
|
||||
runtime·throw("stack overflow");
|
||||
}
|
||||
|
||||
// Note that the concurrent GC might be scanning the stack as we try to replace it.
|
||||
// copystack takes care of the appropriate coordination with the stack scanner.
|
||||
copystack(gp, newsize);
|
||||
if(StackDebug >= 1)
|
||||
runtime·printf("stack grow done\n");
|
||||
runtime·casgstatus(gp, Gwaiting, Grunning);
|
||||
runtime·gogo(&gp->sched);
|
||||
}
|
||||
|
||||
#pragma textflag NOSPLIT
|
||||
void
|
||||
runtime·nilfunc(void)
|
||||
{
|
||||
*(byte*)0 = 0;
|
||||
}
|
||||
|
||||
// adjust Gobuf as if it executed a call to fn
|
||||
// and then did an immediate gosave.
|
||||
void
|
||||
runtime·gostartcallfn(Gobuf *gobuf, FuncVal *fv)
|
||||
{
|
||||
void *fn;
|
||||
|
||||
if(fv != nil)
|
||||
fn = fv->fn;
|
||||
else
|
||||
fn = runtime·nilfunc;
|
||||
runtime·gostartcall(gobuf, fn, fv);
|
||||
}
|
||||
|
||||
// Maybe shrink the stack being used by gp.
|
||||
// Called at garbage collection time.
|
||||
void
|
||||
runtime·shrinkstack(G *gp)
|
||||
{
|
||||
uintptr used, oldsize, newsize;
|
||||
|
||||
if(runtime·readgstatus(gp) == Gdead) {
|
||||
if(gp->stack.lo != 0) {
|
||||
// Free whole stack - it will get reallocated
|
||||
// if G is used again.
|
||||
runtime·stackfree(gp->stack);
|
||||
gp->stack.lo = 0;
|
||||
gp->stack.hi = 0;
|
||||
}
|
||||
return;
|
||||
}
|
||||
if(gp->stack.lo == 0)
|
||||
runtime·throw("missing stack in shrinkstack");
|
||||
|
||||
oldsize = gp->stack.hi - gp->stack.lo;
|
||||
newsize = oldsize / 2;
|
||||
if(newsize < FixedStack)
|
||||
return; // don't shrink below the minimum-sized stack
|
||||
used = gp->stack.hi - gp->sched.sp;
|
||||
if(used >= oldsize / 4)
|
||||
return; // still using at least 1/4 of the segment.
|
||||
|
||||
// We can't copy the stack if we're in a syscall.
|
||||
// The syscall might have pointers into the stack.
|
||||
if(gp->syscallsp != 0)
|
||||
return;
|
||||
|
||||
#ifdef GOOS_windows
|
||||
if(gp->m != nil && gp->m->libcallsp != 0)
|
||||
return;
|
||||
#endif
|
||||
if(StackDebug > 0)
|
||||
runtime·printf("shrinking stack %D->%D\n", (uint64)oldsize, (uint64)newsize);
|
||||
copystack(gp, newsize);
|
||||
}
|
||||
|
||||
// Do any delayed stack freeing that was queued up during GC.
|
||||
void
|
||||
runtime·shrinkfinish(void)
|
||||
{
|
||||
Stack s, t;
|
||||
|
||||
runtime·lock(&runtime·stackpoolmu);
|
||||
s = stackfreequeue;
|
||||
stackfreequeue = (Stack){0,0};
|
||||
runtime·unlock(&runtime·stackpoolmu);
|
||||
while(s.lo != 0) {
|
||||
t = *(Stack*)s.lo;
|
||||
runtime·stackfree(s);
|
||||
s = t;
|
||||
}
|
||||
}
|
||||
|
||||
static void badc(void);
|
||||
|
||||
#pragma textflag NOSPLIT
|
||||
void
|
||||
runtime·morestackc(void)
|
||||
{
|
||||
void (*fn)(void);
|
||||
|
||||
fn = badc;
|
||||
runtime·onM(&fn);
|
||||
}
|
||||
|
||||
static void
|
||||
badc(void)
|
||||
{
|
||||
runtime·throw("attempt to execute C code on Go stack");
|
||||
}
|
@ -1,13 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package runtime
|
||||
|
||||
const (
|
||||
// Goroutine preemption request.
|
||||
// Stored into g->stackguard0 to cause split stack check failure.
|
||||
// Must be greater than any real sp.
|
||||
// 0xfffffade in hex.
|
||||
stackPreempt = ^uintptr(1313)
|
||||
)
|
@ -2,117 +2,24 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Stack layout parameters.
|
||||
Included both by runtime (compiled via 6c) and linkers (compiled via gcc).
|
||||
|
||||
The per-goroutine g->stackguard is set to point StackGuard bytes
|
||||
above the bottom of the stack. Each function compares its stack
|
||||
pointer against g->stackguard to check for overflow. To cut one
|
||||
instruction from the check sequence for functions with tiny frames,
|
||||
the stack is allowed to protrude StackSmall bytes below the stack
|
||||
guard. Functions with large frames don't bother with the check and
|
||||
always call morestack. The sequences are (for amd64, others are
|
||||
similar):
|
||||
|
||||
guard = g->stackguard
|
||||
frame = function's stack frame size
|
||||
argsize = size of function arguments (call + return)
|
||||
|
||||
stack frame size <= StackSmall:
|
||||
CMPQ guard, SP
|
||||
JHI 3(PC)
|
||||
MOVQ m->morearg, $(argsize << 32)
|
||||
CALL morestack(SB)
|
||||
|
||||
stack frame size > StackSmall but < StackBig
|
||||
LEAQ (frame-StackSmall)(SP), R0
|
||||
CMPQ guard, R0
|
||||
JHI 3(PC)
|
||||
MOVQ m->morearg, $(argsize << 32)
|
||||
CALL morestack(SB)
|
||||
|
||||
stack frame size >= StackBig:
|
||||
MOVQ m->morearg, $((argsize << 32) | frame)
|
||||
CALL morestack(SB)
|
||||
|
||||
The bottom StackGuard - StackSmall bytes are important: there has
|
||||
to be enough room to execute functions that refuse to check for
|
||||
stack overflow, either because they need to be adjacent to the
|
||||
actual caller's frame (deferproc) or because they handle the imminent
|
||||
stack overflow (morestack).
|
||||
|
||||
For example, deferproc might call malloc, which does one of the
|
||||
above checks (without allocating a full frame), which might trigger
|
||||
a call to morestack. This sequence needs to fit in the bottom
|
||||
section of the stack. On amd64, morestack's frame is 40 bytes, and
|
||||
deferproc's frame is 56 bytes. That fits well within the
|
||||
StackGuard - StackSmall bytes at the bottom.
|
||||
The linkers explore all possible call traces involving non-splitting
|
||||
functions to make sure that this limit cannot be violated.
|
||||
*/
|
||||
// For the linkers. Must match Go definitions.
|
||||
// TODO(rsc): Share Go definitions with linkers directly.
|
||||
|
||||
enum {
|
||||
// StackSystem is a number of additional bytes to add
|
||||
// to each stack below the usual guard area for OS-specific
|
||||
// purposes like signal handling. Used on Windows and on
|
||||
// Plan 9 because they do not use a separate stack.
|
||||
#ifdef GOOS_windows
|
||||
StackSystem = 512 * sizeof(uintptr),
|
||||
#else
|
||||
#ifdef GOOS_plan9
|
||||
// The size of the note handler frame varies among architectures,
|
||||
// but 512 bytes should be enough for every implementation.
|
||||
StackSystem = 512,
|
||||
#else
|
||||
StackSystem = 0,
|
||||
#endif // Plan 9
|
||||
#endif // Windows
|
||||
|
||||
// The minimum size of stack used by Go code
|
||||
StackMin = 2048,
|
||||
|
||||
// The minimum stack size to allocate.
|
||||
// The hackery here rounds FixedStack0 up to a power of 2.
|
||||
FixedStack0 = StackMin + StackSystem,
|
||||
FixedStack1 = FixedStack0 - 1,
|
||||
FixedStack2 = FixedStack1 | (FixedStack1 >> 1),
|
||||
FixedStack3 = FixedStack2 | (FixedStack2 >> 2),
|
||||
FixedStack4 = FixedStack3 | (FixedStack3 >> 4),
|
||||
FixedStack5 = FixedStack4 | (FixedStack4 >> 8),
|
||||
FixedStack6 = FixedStack5 | (FixedStack5 >> 16),
|
||||
FixedStack = FixedStack6 + 1,
|
||||
|
||||
// Functions that need frames bigger than this use an extra
|
||||
// instruction to do the stack split check, to avoid overflow
|
||||
// in case SP - framesize wraps below zero.
|
||||
// This value can be no bigger than the size of the unmapped
|
||||
// space at zero.
|
||||
StackBig = 4096,
|
||||
|
||||
// The stack guard is a pointer this many bytes above the
|
||||
// bottom of the stack.
|
||||
StackGuard = 512 + StackSystem,
|
||||
|
||||
// After a stack split check the SP is allowed to be this
|
||||
// many bytes below the stack guard. This saves an instruction
|
||||
// in the checking sequence for tiny frames.
|
||||
StackSmall = 128,
|
||||
|
||||
// The maximum number of bytes that a chain of NOSPLIT
|
||||
// functions can use.
|
||||
StackLimit = StackGuard - StackSystem - StackSmall,
|
||||
};
|
||||
|
||||
// Goroutine preemption request.
|
||||
// Stored into g->stackguard0 to cause split stack check failure.
|
||||
// Must be greater than any real sp.
|
||||
// 0xfffffade in hex.
|
||||
#define StackPreempt ((uint64)-1314)
|
||||
/*c2go
|
||||
enum
|
||||
{
|
||||
StackPreempt = -1314,
|
||||
};
|
||||
*/
|
||||
#define StackFork ((uint64)-1234)
|
||||
|
807
src/runtime/stack1.go
Normal file
807
src/runtime/stack1.go
Normal file
@ -0,0 +1,807 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package runtime
|
||||
|
||||
import "unsafe"
|
||||
|
||||
const (
|
||||
// StackDebug == 0: no logging
|
||||
// == 1: logging of per-stack operations
|
||||
// == 2: logging of per-frame operations
|
||||
// == 3: logging of per-word updates
|
||||
// == 4: logging of per-word reads
|
||||
stackDebug = 0
|
||||
stackFromSystem = 0 // allocate stacks from system memory instead of the heap
|
||||
stackFaultOnFree = 0 // old stacks are mapped noaccess to detect use after free
|
||||
stackPoisonCopy = 0 // fill stack that should not be accessed with garbage, to detect bad dereferences during copy
|
||||
|
||||
stackCache = 1
|
||||
)
|
||||
|
||||
const (
|
||||
uintptrMask = 1<<(8*ptrSize) - 1
|
||||
poisonGC = uintptrMask & 0xf969696969696969
|
||||
poisonStack = uintptrMask & 0x6868686868686868
|
||||
|
||||
// Goroutine preemption request.
|
||||
// Stored into g->stackguard0 to cause split stack check failure.
|
||||
// Must be greater than any real sp.
|
||||
// 0xfffffade in hex.
|
||||
stackPreempt = uintptrMask & -1314
|
||||
|
||||
// Thread is forking.
|
||||
// Stored into g->stackguard0 to cause split stack check failure.
|
||||
// Must be greater than any real sp.
|
||||
stackFork = uintptrMask & -1234
|
||||
)
|
||||
|
||||
// Global pool of spans that have free stacks.
|
||||
// Stacks are assigned an order according to size.
|
||||
// order = log_2(size/FixedStack)
|
||||
// There is a free list for each order.
|
||||
// TODO: one lock per order?
|
||||
var stackpool [_NumStackOrders]mspan
|
||||
var stackpoolmu mutex
|
||||
|
||||
var stackfreequeue stack
|
||||
|
||||
func stackinit() {
|
||||
if _StackCacheSize&_PageMask != 0 {
|
||||
gothrow("cache size must be a multiple of page size")
|
||||
}
|
||||
for i := range stackpool {
|
||||
mSpanList_Init(&stackpool[i])
|
||||
}
|
||||
}
|
||||
|
||||
// Allocates a stack from the free pool. Must be called with
|
||||
// stackpoolmu held.
|
||||
func stackpoolalloc(order uint8) *mlink {
|
||||
list := &stackpool[order]
|
||||
s := list.next
|
||||
if s == list {
|
||||
// no free stacks. Allocate another span worth.
|
||||
s = mHeap_AllocStack(&mheap_, _StackCacheSize>>_PageShift)
|
||||
if s == nil {
|
||||
gothrow("out of memory")
|
||||
}
|
||||
if s.ref != 0 {
|
||||
gothrow("bad ref")
|
||||
}
|
||||
if s.freelist != nil {
|
||||
gothrow("bad freelist")
|
||||
}
|
||||
for i := uintptr(0); i < _StackCacheSize; i += _FixedStack << order {
|
||||
x := (*mlink)(unsafe.Pointer(uintptr(s.start)<<_PageShift + i))
|
||||
x.next = s.freelist
|
||||
s.freelist = x
|
||||
}
|
||||
mSpanList_Insert(list, s)
|
||||
}
|
||||
x := s.freelist
|
||||
if x == nil {
|
||||
gothrow("span has no free stacks")
|
||||
}
|
||||
s.freelist = x.next
|
||||
s.ref++
|
||||
if s.freelist == nil {
|
||||
// all stacks in s are allocated.
|
||||
mSpanList_Remove(s)
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
// Adds stack x to the free pool. Must be called with stackpoolmu held.
|
||||
func stackpoolfree(x *mlink, order uint8) {
|
||||
s := mHeap_Lookup(&mheap_, (unsafe.Pointer)(x))
|
||||
if s.state != _MSpanStack {
|
||||
gothrow("freeing stack not in a stack span")
|
||||
}
|
||||
if s.freelist == nil {
|
||||
// s will now have a free stack
|
||||
mSpanList_Insert(&stackpool[order], s)
|
||||
}
|
||||
x.next = s.freelist
|
||||
s.freelist = x
|
||||
s.ref--
|
||||
if s.ref == 0 {
|
||||
// span is completely free - return to heap
|
||||
mSpanList_Remove(s)
|
||||
s.freelist = nil
|
||||
mHeap_FreeStack(&mheap_, s)
|
||||
}
|
||||
}
|
||||
|
||||
// stackcacherefill/stackcacherelease implement a global pool of stack segments.
|
||||
// The pool is required to prevent unlimited growth of per-thread caches.
|
||||
func stackcacherefill(c *mcache, order uint8) {
|
||||
if stackDebug >= 1 {
|
||||
print("stackcacherefill order=", order, "\n")
|
||||
}
|
||||
|
||||
// Grab some stacks from the global cache.
|
||||
// Grab half of the allowed capacity (to prevent thrashing).
|
||||
var list *mlink
|
||||
var size uintptr
|
||||
lock(&stackpoolmu)
|
||||
for size < _StackCacheSize/2 {
|
||||
x := stackpoolalloc(order)
|
||||
x.next = list
|
||||
list = x
|
||||
size += _FixedStack << order
|
||||
}
|
||||
unlock(&stackpoolmu)
|
||||
c.stackcache[order].list = list
|
||||
c.stackcache[order].size = size
|
||||
}
|
||||
|
||||
func stackcacherelease(c *mcache, order uint8) {
|
||||
if stackDebug >= 1 {
|
||||
print("stackcacherelease order=", order, "\n")
|
||||
}
|
||||
x := c.stackcache[order].list
|
||||
size := c.stackcache[order].size
|
||||
lock(&stackpoolmu)
|
||||
for size > _StackCacheSize/2 {
|
||||
y := x.next
|
||||
stackpoolfree(x, order)
|
||||
x = y
|
||||
size -= _FixedStack << order
|
||||
}
|
||||
unlock(&stackpoolmu)
|
||||
c.stackcache[order].list = x
|
||||
c.stackcache[order].size = size
|
||||
}
|
||||
|
||||
func stackcache_clear(c *mcache) {
|
||||
if stackDebug >= 1 {
|
||||
print("stackcache clear\n")
|
||||
}
|
||||
lock(&stackpoolmu)
|
||||
for order := uint8(0); order < _NumStackOrders; order++ {
|
||||
x := c.stackcache[order].list
|
||||
for x != nil {
|
||||
y := x.next
|
||||
stackpoolfree(x, order)
|
||||
x = y
|
||||
}
|
||||
c.stackcache[order].list = nil
|
||||
c.stackcache[order].size = 0
|
||||
}
|
||||
unlock(&stackpoolmu)
|
||||
}
|
||||
|
||||
func stackalloc(n uint32) stack {
|
||||
// Stackalloc must be called on scheduler stack, so that we
|
||||
// never try to grow the stack during the code that stackalloc runs.
|
||||
// Doing so would cause a deadlock (issue 1547).
|
||||
thisg := getg()
|
||||
if thisg != thisg.m.g0 {
|
||||
gothrow("stackalloc not on scheduler stack")
|
||||
}
|
||||
if n&(n-1) != 0 {
|
||||
gothrow("stack size not a power of 2")
|
||||
}
|
||||
if stackDebug >= 1 {
|
||||
print("stackalloc ", n, "\n")
|
||||
}
|
||||
|
||||
if debug.efence != 0 || stackFromSystem != 0 {
|
||||
v := sysAlloc(round(uintptr(n), _PageSize), &memstats.stacks_sys)
|
||||
if v == nil {
|
||||
gothrow("out of memory (stackalloc)")
|
||||
}
|
||||
return stack{uintptr(v), uintptr(v) + uintptr(n)}
|
||||
}
|
||||
|
||||
// Small stacks are allocated with a fixed-size free-list allocator.
|
||||
// If we need a stack of a bigger size, we fall back on allocating
|
||||
// a dedicated span.
|
||||
var v unsafe.Pointer
|
||||
if stackCache != 0 && n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
|
||||
order := uint8(0)
|
||||
n2 := n
|
||||
for n2 > _FixedStack {
|
||||
order++
|
||||
n2 >>= 1
|
||||
}
|
||||
var x *mlink
|
||||
c := thisg.m.mcache
|
||||
if c == nil || thisg.m.gcing != 0 || thisg.m.helpgc != 0 {
|
||||
// c == nil can happen in the guts of exitsyscall or
|
||||
// procresize. Just get a stack from the global pool.
|
||||
// Also don't touch stackcache during gc
|
||||
// as it's flushed concurrently.
|
||||
lock(&stackpoolmu)
|
||||
x = stackpoolalloc(order)
|
||||
unlock(&stackpoolmu)
|
||||
} else {
|
||||
x = c.stackcache[order].list
|
||||
if x == nil {
|
||||
stackcacherefill(c, order)
|
||||
x = c.stackcache[order].list
|
||||
}
|
||||
c.stackcache[order].list = x.next
|
||||
c.stackcache[order].size -= uintptr(n)
|
||||
}
|
||||
v = (unsafe.Pointer)(x)
|
||||
} else {
|
||||
s := mHeap_AllocStack(&mheap_, round(uintptr(n), _PageSize)>>_PageShift)
|
||||
if s == nil {
|
||||
gothrow("out of memory")
|
||||
}
|
||||
v = (unsafe.Pointer)(s.start << _PageShift)
|
||||
}
|
||||
|
||||
if raceenabled {
|
||||
racemalloc(v, uintptr(n))
|
||||
}
|
||||
if stackDebug >= 1 {
|
||||
print(" allocated ", v, "\n")
|
||||
}
|
||||
return stack{uintptr(v), uintptr(v) + uintptr(n)}
|
||||
}
|
||||
|
||||
func stackfree(stk stack) {
|
||||
gp := getg()
|
||||
n := stk.hi - stk.lo
|
||||
v := (unsafe.Pointer)(stk.lo)
|
||||
if n&(n-1) != 0 {
|
||||
gothrow("stack not a power of 2")
|
||||
}
|
||||
if stackDebug >= 1 {
|
||||
println("stackfree", v, n)
|
||||
memclr(v, n) // for testing, clobber stack data
|
||||
}
|
||||
if debug.efence != 0 || stackFromSystem != 0 {
|
||||
if debug.efence != 0 || stackFaultOnFree != 0 {
|
||||
sysFault(v, n)
|
||||
} else {
|
||||
sysFree(v, n, &memstats.stacks_sys)
|
||||
}
|
||||
return
|
||||
}
|
||||
if stackCache != 0 && n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
|
||||
order := uint8(0)
|
||||
n2 := n
|
||||
for n2 > _FixedStack {
|
||||
order++
|
||||
n2 >>= 1
|
||||
}
|
||||
x := (*mlink)(v)
|
||||
c := gp.m.mcache
|
||||
if c == nil || gp.m.gcing != 0 || gp.m.helpgc != 0 {
|
||||
lock(&stackpoolmu)
|
||||
stackpoolfree(x, order)
|
||||
unlock(&stackpoolmu)
|
||||
} else {
|
||||
if c.stackcache[order].size >= _StackCacheSize {
|
||||
stackcacherelease(c, order)
|
||||
}
|
||||
x.next = c.stackcache[order].list
|
||||
c.stackcache[order].list = x
|
||||
c.stackcache[order].size += n
|
||||
}
|
||||
} else {
|
||||
s := mHeap_Lookup(&mheap_, v)
|
||||
if s.state != _MSpanStack {
|
||||
println(hex(s.start<<_PageShift), v)
|
||||
gothrow("bad span state")
|
||||
}
|
||||
mHeap_FreeStack(&mheap_, s)
|
||||
}
|
||||
}
|
||||
|
||||
var maxstacksize uintptr = 1 << 20 // enough until runtime.main sets it for real
|
||||
|
||||
var mapnames = []string{
|
||||
_BitsDead: "---",
|
||||
_BitsScalar: "scalar",
|
||||
_BitsPointer: "ptr",
|
||||
}
|
||||
|
||||
// Stack frame layout
|
||||
//
|
||||
// (x86)
|
||||
// +------------------+
|
||||
// | args from caller |
|
||||
// +------------------+ <- frame->argp
|
||||
// | return address |
|
||||
// +------------------+ <- frame->varp
|
||||
// | locals |
|
||||
// +------------------+
|
||||
// | args to callee |
|
||||
// +------------------+ <- frame->sp
|
||||
//
|
||||
// (arm)
|
||||
// +------------------+
|
||||
// | args from caller |
|
||||
// +------------------+ <- frame->argp
|
||||
// | caller's retaddr |
|
||||
// +------------------+ <- frame->varp
|
||||
// | locals |
|
||||
// +------------------+
|
||||
// | args to callee |
|
||||
// +------------------+
|
||||
// | return address |
|
||||
// +------------------+ <- frame->sp
|
||||
|
||||
type adjustinfo struct {
|
||||
old stack
|
||||
delta uintptr // ptr distance from old to new stack (newbase - oldbase)
|
||||
}
|
||||
|
||||
// Adjustpointer checks whether *vpp is in the old stack described by adjinfo.
|
||||
// If so, it rewrites *vpp to point into the new stack.
|
||||
func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) {
|
||||
pp := (*unsafe.Pointer)(vpp)
|
||||
p := *pp
|
||||
if stackDebug >= 4 {
|
||||
print(" ", pp, ":", p, "\n")
|
||||
}
|
||||
if adjinfo.old.lo <= uintptr(p) && uintptr(p) < adjinfo.old.hi {
|
||||
*pp = add(p, adjinfo.delta)
|
||||
if stackDebug >= 3 {
|
||||
print(" adjust ptr ", pp, ":", p, " -> ", *pp, "\n")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type gobitvector struct {
|
||||
n uintptr
|
||||
bytedata []uint8
|
||||
}
|
||||
|
||||
func gobv(bv bitvector) gobitvector {
|
||||
return gobitvector{
|
||||
uintptr(bv.n),
|
||||
(*[1 << 30]byte)(unsafe.Pointer(bv.bytedata))[:(bv.n+7)/8],
|
||||
}
|
||||
}
|
||||
|
||||
func ptrbits(bv *gobitvector, i uintptr) uint8 {
|
||||
return (bv.bytedata[i/4] >> ((i & 3) * 2)) & 3
|
||||
}
|
||||
|
||||
// bv describes the memory starting at address scanp.
|
||||
// Adjust any pointers contained therein.
|
||||
func adjustpointers(scanp unsafe.Pointer, cbv *bitvector, adjinfo *adjustinfo, f *_func) {
|
||||
bv := gobv(*cbv)
|
||||
minp := adjinfo.old.lo
|
||||
maxp := adjinfo.old.hi
|
||||
delta := adjinfo.delta
|
||||
num := uintptr(bv.n / _BitsPerPointer)
|
||||
for i := uintptr(0); i < num; i++ {
|
||||
if stackDebug >= 4 {
|
||||
print(" ", add(scanp, i*ptrSize), ":", mapnames[ptrbits(&bv, i)], ":", hex(*(*uintptr)(add(scanp, i*ptrSize))), " # ", i, " ", bv.bytedata[i/4], "\n")
|
||||
}
|
||||
switch ptrbits(&bv, i) {
|
||||
default:
|
||||
gothrow("unexpected pointer bits")
|
||||
case _BitsDead:
|
||||
if debug.gcdead != 0 {
|
||||
*(*unsafe.Pointer)(add(scanp, i*ptrSize)) = unsafe.Pointer(uintptr(poisonStack))
|
||||
}
|
||||
case _BitsScalar:
|
||||
// ok
|
||||
case _BitsPointer:
|
||||
p := *(*unsafe.Pointer)(add(scanp, i*ptrSize))
|
||||
up := uintptr(p)
|
||||
if f != nil && 0 < up && up < _PageSize && invalidptr != 0 || up == poisonGC || up == poisonStack {
|
||||
// Looks like a junk value in a pointer slot.
|
||||
// Live analysis wrong?
|
||||
getg().m.traceback = 2
|
||||
print("runtime: bad pointer in frame ", gofuncname(f), " at ", add(scanp, i*ptrSize), ": ", p, "\n")
|
||||
gothrow("invalid stack pointer")
|
||||
}
|
||||
if minp <= up && up < maxp {
|
||||
if stackDebug >= 3 {
|
||||
print("adjust ptr ", p, " ", gofuncname(f), "\n")
|
||||
}
|
||||
*(*unsafe.Pointer)(add(scanp, i*ptrSize)) = unsafe.Pointer(up + delta)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Note: the argument/return area is adjusted by the callee.
|
||||
func adjustframe(frame *stkframe, arg unsafe.Pointer) bool {
|
||||
adjinfo := (*adjustinfo)(arg)
|
||||
targetpc := frame.continpc
|
||||
if targetpc == 0 {
|
||||
// Frame is dead.
|
||||
return true
|
||||
}
|
||||
f := frame.fn
|
||||
if stackDebug >= 2 {
|
||||
print(" adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n")
|
||||
}
|
||||
if f.entry == switchtoMPC {
|
||||
// A special routine at the bottom of stack of a goroutine that does an onM call.
|
||||
// We will allow it to be copied even though we don't
|
||||
// have full GC info for it (because it is written in asm).
|
||||
return true
|
||||
}
|
||||
if targetpc != f.entry {
|
||||
targetpc--
|
||||
}
|
||||
pcdata := pcdatavalue(f, _PCDATA_StackMapIndex, targetpc)
|
||||
if pcdata == -1 {
|
||||
pcdata = 0 // in prologue
|
||||
}
|
||||
|
||||
// Adjust local variables if stack frame has been allocated.
|
||||
size := frame.varp - frame.sp
|
||||
var minsize uintptr
|
||||
if thechar != '6' && thechar != '8' {
|
||||
minsize = ptrSize
|
||||
} else {
|
||||
minsize = 0
|
||||
}
|
||||
if size > minsize {
|
||||
var bv bitvector
|
||||
stackmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
|
||||
if stackmap == nil || stackmap.n <= 0 {
|
||||
print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n")
|
||||
gothrow("missing stackmap")
|
||||
}
|
||||
// Locals bitmap information, scan just the pointers in locals.
|
||||
if pcdata < 0 || pcdata >= stackmap.n {
|
||||
// don't know where we are
|
||||
print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n")
|
||||
gothrow("bad symbol table")
|
||||
}
|
||||
bv = stackmapdata(stackmap, pcdata)
|
||||
size = (uintptr(bv.n) * ptrSize) / _BitsPerPointer
|
||||
if stackDebug >= 3 {
|
||||
print(" locals ", pcdata, "/", stackmap.n, " ", size/ptrSize, " words ", bv.bytedata, "\n")
|
||||
}
|
||||
adjustpointers(unsafe.Pointer(frame.varp-size), &bv, adjinfo, f)
|
||||
}
|
||||
|
||||
// Adjust arguments.
|
||||
if frame.arglen > 0 {
|
||||
var bv bitvector
|
||||
if frame.argmap != nil {
|
||||
bv = *frame.argmap
|
||||
} else {
|
||||
stackmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
|
||||
if stackmap == nil || stackmap.n <= 0 {
|
||||
print("runtime: frame ", funcname(f), " untyped args ", frame.argp, "+", uintptr(frame.arglen), "\n")
|
||||
gothrow("missing stackmap")
|
||||
}
|
||||
if pcdata < 0 || pcdata >= stackmap.n {
|
||||
// don't know where we are
|
||||
print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " args stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n")
|
||||
gothrow("bad symbol table")
|
||||
}
|
||||
bv = stackmapdata(stackmap, pcdata)
|
||||
}
|
||||
if stackDebug >= 3 {
|
||||
print(" args\n")
|
||||
}
|
||||
adjustpointers(unsafe.Pointer(frame.argp), &bv, adjinfo, nil)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func adjustctxt(gp *g, adjinfo *adjustinfo) {
|
||||
adjustpointer(adjinfo, (unsafe.Pointer)(&gp.sched.ctxt))
|
||||
}
|
||||
|
||||
func adjustdefers(gp *g, adjinfo *adjustinfo) {
|
||||
// Adjust defer argument blocks the same way we adjust active stack frames.
|
||||
tracebackdefers(gp, adjustframe, noescape(unsafe.Pointer(adjinfo)))
|
||||
|
||||
// Adjust pointers in the Defer structs.
|
||||
// Defer structs themselves are never on the stack.
|
||||
for d := gp._defer; d != nil; d = d.link {
|
||||
adjustpointer(adjinfo, (unsafe.Pointer)(&d.fn))
|
||||
adjustpointer(adjinfo, (unsafe.Pointer)(&d.argp))
|
||||
adjustpointer(adjinfo, (unsafe.Pointer)(&d._panic))
|
||||
}
|
||||
}
|
||||
|
||||
func adjustpanics(gp *g, adjinfo *adjustinfo) {
|
||||
// Panics are on stack and already adjusted.
|
||||
// Update pointer to head of list in G.
|
||||
adjustpointer(adjinfo, (unsafe.Pointer)(&gp._panic))
|
||||
}
|
||||
|
||||
func adjustsudogs(gp *g, adjinfo *adjustinfo) {
|
||||
// the data elements pointed to by a SudoG structure
|
||||
// might be in the stack.
|
||||
for s := gp.waiting; s != nil; s = s.waitlink {
|
||||
adjustpointer(adjinfo, (unsafe.Pointer)(&s.elem))
|
||||
adjustpointer(adjinfo, (unsafe.Pointer)(&s.selectdone))
|
||||
}
|
||||
}
|
||||
|
||||
func fillstack(stk stack, b byte) {
|
||||
for p := stk.lo; p < stk.hi; p++ {
|
||||
*(*byte)(unsafe.Pointer(p)) = b
|
||||
}
|
||||
}
|
||||
|
||||
// Copies gp's stack to a new stack of a different size.
|
||||
func copystack(gp *g, newsize uintptr) {
|
||||
if gp.syscallsp != 0 {
|
||||
gothrow("stack growth not allowed in system call")
|
||||
}
|
||||
old := gp.stack
|
||||
if old.lo == 0 {
|
||||
gothrow("nil stackbase")
|
||||
}
|
||||
used := old.hi - gp.sched.sp
|
||||
|
||||
// allocate new stack
|
||||
new := stackalloc(uint32(newsize))
|
||||
if stackPoisonCopy != 0 {
|
||||
fillstack(new, 0xfd)
|
||||
}
|
||||
if stackDebug >= 1 {
|
||||
print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]/", old.hi-old.lo, " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n")
|
||||
}
|
||||
|
||||
// adjust pointers in the to-be-copied frames
|
||||
var adjinfo adjustinfo
|
||||
adjinfo.old = old
|
||||
adjinfo.delta = new.hi - old.hi
|
||||
gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, adjustframe, noescape(unsafe.Pointer(&adjinfo)), 0)
|
||||
|
||||
// adjust other miscellaneous things that have pointers into stacks.
|
||||
adjustctxt(gp, &adjinfo)
|
||||
adjustdefers(gp, &adjinfo)
|
||||
adjustpanics(gp, &adjinfo)
|
||||
adjustsudogs(gp, &adjinfo)
|
||||
|
||||
// copy the stack to the new location
|
||||
if stackPoisonCopy != 0 {
|
||||
fillstack(new, 0xfb)
|
||||
}
|
||||
memmove(unsafe.Pointer(new.hi-used), unsafe.Pointer(old.hi-used), used)
|
||||
|
||||
oldstatus := readgstatus(gp)
|
||||
oldstatus &^= _Gscan
|
||||
if oldstatus == _Gwaiting || oldstatus == _Grunnable {
|
||||
casgstatus(gp, oldstatus, _Gcopystack) // oldstatus is Gwaiting or Grunnable
|
||||
} else {
|
||||
gothrow("copystack: bad status, not Gwaiting or Grunnable")
|
||||
}
|
||||
|
||||
// Swap out old stack for new one
|
||||
gp.stack = new
|
||||
gp.stackguard0 = new.lo + _StackGuard // NOTE: might clobber a preempt request
|
||||
gp.sched.sp = new.hi - used
|
||||
|
||||
casgstatus(gp, _Gcopystack, oldstatus) // oldstatus is Gwaiting or Grunnable
|
||||
|
||||
// free old stack
|
||||
if stackPoisonCopy != 0 {
|
||||
fillstack(old, 0xfc)
|
||||
}
|
||||
if newsize > old.hi-old.lo {
|
||||
// growing, free stack immediately
|
||||
stackfree(old)
|
||||
} else {
|
||||
// shrinking, queue up free operation. We can't actually free the stack
|
||||
// just yet because we might run into the following situation:
|
||||
// 1) GC starts, scans a SudoG but does not yet mark the SudoG.elem pointer
|
||||
// 2) The stack that pointer points to is shrunk
|
||||
// 3) The old stack is freed
|
||||
// 4) The containing span is marked free
|
||||
// 5) GC attempts to mark the SudoG.elem pointer. The marking fails because
|
||||
// the pointer looks like a pointer into a free span.
|
||||
// By not freeing, we prevent step #4 until GC is done.
|
||||
lock(&stackpoolmu)
|
||||
*(*stack)(unsafe.Pointer(old.lo)) = stackfreequeue
|
||||
stackfreequeue = old
|
||||
unlock(&stackpoolmu)
|
||||
}
|
||||
}
|
||||
|
||||
// round x up to a power of 2.
|
||||
func round2(x int32) int32 {
|
||||
s := uint(0)
|
||||
for 1<<s < x {
|
||||
s++
|
||||
}
|
||||
return 1 << s
|
||||
}
|
||||
|
||||
// Called from runtime·morestack when more stack is needed.
|
||||
// Allocate larger stack and relocate to new stack.
|
||||
// Stack growth is multiplicative, for constant amortized cost.
|
||||
//
|
||||
// g->atomicstatus will be Grunning or Gscanrunning upon entry.
|
||||
// If the GC is trying to stop this g then it will set preemptscan to true.
|
||||
func newstack() {
|
||||
thisg := getg()
|
||||
// TODO: double check all gp. shouldn't be getg().
|
||||
if thisg.m.morebuf.g.stackguard0 == stackFork {
|
||||
gothrow("stack growth after fork")
|
||||
}
|
||||
if thisg.m.morebuf.g != thisg.m.curg {
|
||||
print("runtime: newstack called from g=", thisg.m.morebuf.g, "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n")
|
||||
morebuf := thisg.m.morebuf
|
||||
traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g)
|
||||
gothrow("runtime: wrong goroutine in newstack")
|
||||
}
|
||||
if thisg.m.curg.throwsplit {
|
||||
gp := thisg.m.curg
|
||||
// Update syscallsp, syscallpc in case traceback uses them.
|
||||
morebuf := thisg.m.morebuf
|
||||
gp.syscallsp = morebuf.sp
|
||||
gp.syscallpc = morebuf.pc
|
||||
print("runtime: newstack sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
|
||||
"\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
|
||||
"\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
|
||||
gothrow("runtime: stack split at bad time")
|
||||
}
|
||||
|
||||
// The goroutine must be executing in order to call newstack,
|
||||
// so it must be Grunning or Gscanrunning.
|
||||
|
||||
gp := thisg.m.curg
|
||||
morebuf := thisg.m.morebuf
|
||||
thisg.m.morebuf.pc = 0
|
||||
thisg.m.morebuf.lr = 0
|
||||
thisg.m.morebuf.sp = 0
|
||||
thisg.m.morebuf.g = nil
|
||||
|
||||
casgstatus(gp, _Grunning, _Gwaiting)
|
||||
gp.waitreason = "stack growth"
|
||||
|
||||
rewindmorestack(&gp.sched)
|
||||
|
||||
if gp.stack.lo == 0 {
|
||||
gothrow("missing stack in newstack")
|
||||
}
|
||||
sp := gp.sched.sp
|
||||
if thechar == '6' || thechar == '8' {
|
||||
// The call to morestack cost a word.
|
||||
sp -= ptrSize
|
||||
}
|
||||
if stackDebug >= 1 || sp < gp.stack.lo {
|
||||
print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
|
||||
"\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
|
||||
"\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
|
||||
}
|
||||
if sp < gp.stack.lo {
|
||||
print("runtime: gp=", gp, ", gp->status=", hex(readgstatus(gp)), "\n ")
|
||||
print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n")
|
||||
gothrow("runtime: split stack overflow")
|
||||
}
|
||||
|
||||
if gp.stackguard0 == stackPreempt {
|
||||
if gp == thisg.m.g0 {
|
||||
gothrow("runtime: preempt g0")
|
||||
}
|
||||
if thisg.m.p == nil && thisg.m.locks == 0 {
|
||||
gothrow("runtime: g is running but p is not")
|
||||
}
|
||||
if gp.preemptscan {
|
||||
gcphasework(gp)
|
||||
casgstatus(gp, _Gwaiting, _Grunning)
|
||||
gp.stackguard0 = gp.stack.lo + _StackGuard
|
||||
gp.preempt = false
|
||||
gp.preemptscan = false // Tells the GC premption was successful.
|
||||
gogo(&gp.sched) // never return
|
||||
}
|
||||
|
||||
// Be conservative about where we preempt.
|
||||
// We are interested in preempting user Go code, not runtime code.
|
||||
if thisg.m.locks != 0 || thisg.m.mallocing != 0 || thisg.m.gcing != 0 || thisg.m.p.status != _Prunning {
|
||||
// Let the goroutine keep running for now.
|
||||
// gp->preempt is set, so it will be preempted next time.
|
||||
gp.stackguard0 = gp.stack.lo + _StackGuard
|
||||
casgstatus(gp, _Gwaiting, _Grunning)
|
||||
gogo(&gp.sched) // never return
|
||||
}
|
||||
|
||||
// Act like goroutine called runtime.Gosched.
|
||||
casgstatus(gp, _Gwaiting, _Grunning)
|
||||
gosched_m(gp) // never return
|
||||
}
|
||||
|
||||
// Allocate a bigger segment and move the stack.
|
||||
oldsize := int(gp.stack.hi - gp.stack.lo)
|
||||
newsize := oldsize * 2
|
||||
if uintptr(newsize) > maxstacksize {
|
||||
print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n")
|
||||
gothrow("stack overflow")
|
||||
}
|
||||
|
||||
// Note that the concurrent GC might be scanning the stack as we try to replace it.
|
||||
// copystack takes care of the appropriate coordination with the stack scanner.
|
||||
copystack(gp, uintptr(newsize))
|
||||
if stackDebug >= 1 {
|
||||
print("stack grow done\n")
|
||||
}
|
||||
casgstatus(gp, _Gwaiting, _Grunning)
|
||||
gogo(&gp.sched)
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func nilfunc() {
|
||||
*(*uint8)(nil) = 0
|
||||
}
|
||||
|
||||
// adjust Gobuf as if it executed a call to fn
|
||||
// and then did an immediate gosave.
|
||||
func gostartcallfn(gobuf *gobuf, fv *funcval) {
|
||||
var fn unsafe.Pointer
|
||||
if fv != nil {
|
||||
fn = (unsafe.Pointer)(fv.fn)
|
||||
} else {
|
||||
fn = unsafe.Pointer(funcPC(nilfunc))
|
||||
}
|
||||
gostartcall(gobuf, fn, (unsafe.Pointer)(fv))
|
||||
}
|
||||
|
||||
// Maybe shrink the stack being used by gp.
|
||||
// Called at garbage collection time.
|
||||
func shrinkstack(gp *g) {
|
||||
if readgstatus(gp) == _Gdead {
|
||||
if gp.stack.lo != 0 {
|
||||
// Free whole stack - it will get reallocated
|
||||
// if G is used again.
|
||||
stackfree(gp.stack)
|
||||
gp.stack.lo = 0
|
||||
gp.stack.hi = 0
|
||||
}
|
||||
return
|
||||
}
|
||||
if gp.stack.lo == 0 {
|
||||
gothrow("missing stack in shrinkstack")
|
||||
}
|
||||
|
||||
oldsize := gp.stack.hi - gp.stack.lo
|
||||
newsize := oldsize / 2
|
||||
if newsize < _FixedStack {
|
||||
return // don't shrink below the minimum-sized stack
|
||||
}
|
||||
used := gp.stack.hi - gp.sched.sp
|
||||
if used >= oldsize/4 {
|
||||
return // still using at least 1/4 of the segment.
|
||||
}
|
||||
|
||||
// We can't copy the stack if we're in a syscall.
|
||||
// The syscall might have pointers into the stack.
|
||||
if gp.syscallsp != 0 {
|
||||
return
|
||||
}
|
||||
|
||||
/* TODO
|
||||
if _Windows && gp.m != nil && gp.m.libcallsp != 0 {
|
||||
return
|
||||
}
|
||||
*/
|
||||
|
||||
if stackDebug > 0 {
|
||||
print("shrinking stack ", oldsize, "->", newsize, "\n")
|
||||
}
|
||||
copystack(gp, newsize)
|
||||
}
|
||||
|
||||
// Do any delayed stack freeing that was queued up during GC.
|
||||
func shrinkfinish() {
|
||||
lock(&stackpoolmu)
|
||||
s := stackfreequeue
|
||||
stackfreequeue = stack{}
|
||||
unlock(&stackpoolmu)
|
||||
for s.lo != 0 {
|
||||
t := *(*stack)(unsafe.Pointer(s.lo))
|
||||
stackfree(s)
|
||||
s = t
|
||||
}
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func morestackc() {
|
||||
onM(func() {
|
||||
gothrow("attempt to execute C code on Go stack")
|
||||
})
|
||||
}
|
106
src/runtime/stack2.go
Normal file
106
src/runtime/stack2.go
Normal file
@ -0,0 +1,106 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package runtime
|
||||
|
||||
/*
|
||||
Stack layout parameters.
|
||||
Included both by runtime (compiled via 6c) and linkers (compiled via gcc).
|
||||
|
||||
The per-goroutine g->stackguard is set to point StackGuard bytes
|
||||
above the bottom of the stack. Each function compares its stack
|
||||
pointer against g->stackguard to check for overflow. To cut one
|
||||
instruction from the check sequence for functions with tiny frames,
|
||||
the stack is allowed to protrude StackSmall bytes below the stack
|
||||
guard. Functions with large frames don't bother with the check and
|
||||
always call morestack. The sequences are (for amd64, others are
|
||||
similar):
|
||||
|
||||
guard = g->stackguard
|
||||
frame = function's stack frame size
|
||||
argsize = size of function arguments (call + return)
|
||||
|
||||
stack frame size <= StackSmall:
|
||||
CMPQ guard, SP
|
||||
JHI 3(PC)
|
||||
MOVQ m->morearg, $(argsize << 32)
|
||||
CALL morestack(SB)
|
||||
|
||||
stack frame size > StackSmall but < StackBig
|
||||
LEAQ (frame-StackSmall)(SP), R0
|
||||
CMPQ guard, R0
|
||||
JHI 3(PC)
|
||||
MOVQ m->morearg, $(argsize << 32)
|
||||
CALL morestack(SB)
|
||||
|
||||
stack frame size >= StackBig:
|
||||
MOVQ m->morearg, $((argsize << 32) | frame)
|
||||
CALL morestack(SB)
|
||||
|
||||
The bottom StackGuard - StackSmall bytes are important: there has
|
||||
to be enough room to execute functions that refuse to check for
|
||||
stack overflow, either because they need to be adjacent to the
|
||||
actual caller's frame (deferproc) or because they handle the imminent
|
||||
stack overflow (morestack).
|
||||
|
||||
For example, deferproc might call malloc, which does one of the
|
||||
above checks (without allocating a full frame), which might trigger
|
||||
a call to morestack. This sequence needs to fit in the bottom
|
||||
section of the stack. On amd64, morestack's frame is 40 bytes, and
|
||||
deferproc's frame is 56 bytes. That fits well within the
|
||||
StackGuard - StackSmall bytes at the bottom.
|
||||
The linkers explore all possible call traces involving non-splitting
|
||||
functions to make sure that this limit cannot be violated.
|
||||
*/
|
||||
|
||||
const (
|
||||
// StackSystem is a number of additional bytes to add
|
||||
// to each stack below the usual guard area for OS-specific
|
||||
// purposes like signal handling. Used on Windows and on
|
||||
// Plan 9 because they do not use a separate stack.
|
||||
_StackSystem = _Windows*512*ptrSize + _Plan9*512
|
||||
|
||||
// The minimum size of stack used by Go code
|
||||
_StackMin = 2048
|
||||
|
||||
// The minimum stack size to allocate.
|
||||
// The hackery here rounds FixedStack0 up to a power of 2.
|
||||
_FixedStack0 = _StackMin + _StackSystem
|
||||
_FixedStack1 = _FixedStack0 - 1
|
||||
_FixedStack2 = _FixedStack1 | (_FixedStack1 >> 1)
|
||||
_FixedStack3 = _FixedStack2 | (_FixedStack2 >> 2)
|
||||
_FixedStack4 = _FixedStack3 | (_FixedStack3 >> 4)
|
||||
_FixedStack5 = _FixedStack4 | (_FixedStack4 >> 8)
|
||||
_FixedStack6 = _FixedStack5 | (_FixedStack5 >> 16)
|
||||
_FixedStack = _FixedStack6 + 1
|
||||
|
||||
// Functions that need frames bigger than this use an extra
|
||||
// instruction to do the stack split check, to avoid overflow
|
||||
// in case SP - framesize wraps below zero.
|
||||
// This value can be no bigger than the size of the unmapped
|
||||
// space at zero.
|
||||
_StackBig = 4096
|
||||
|
||||
// The stack guard is a pointer this many bytes above the
|
||||
// bottom of the stack.
|
||||
_StackGuard = 512 + _StackSystem
|
||||
|
||||
// After a stack split check the SP is allowed to be this
|
||||
// many bytes below the stack guard. This saves an instruction
|
||||
// in the checking sequence for tiny frames.
|
||||
_StackSmall = 128
|
||||
|
||||
// The maximum number of bytes that a chain of NOSPLIT
|
||||
// functions can use.
|
||||
_StackLimit = _StackGuard - _StackSystem - _StackSmall
|
||||
)
|
||||
|
||||
// Goroutine preemption request.
|
||||
// Stored into g->stackguard0 to cause split stack check failure.
|
||||
// Must be greater than any real sp.
|
||||
// 0xfffffade in hex.
|
||||
const (
|
||||
_StackPreempt = uintptrMask & -1314
|
||||
_StackFork = uintptrMask & -1234
|
||||
)
|
@ -22,8 +22,7 @@ func (f *Func) raw() *_func {
|
||||
|
||||
// funcdata.h
|
||||
const (
|
||||
_PCDATA_ArgSize = 0
|
||||
_PCDATA_StackMapIndex = 1
|
||||
_PCDATA_StackMapIndex = 0
|
||||
_FUNCDATA_ArgsPointerMaps = 0
|
||||
_FUNCDATA_LocalsPointerMaps = 1
|
||||
_FUNCDATA_DeadValueMaps = 2
|
||||
|
@ -41,6 +41,7 @@ var (
|
||||
newprocPC uintptr
|
||||
rt0_goPC uintptr
|
||||
sigpanicPC uintptr
|
||||
switchtoMPC uintptr
|
||||
|
||||
externalthreadhandlerp uintptr // initialized elsewhere
|
||||
)
|
||||
@ -59,6 +60,7 @@ func tracebackinit() {
|
||||
newprocPC = funcPC(newproc)
|
||||
rt0_goPC = funcPC(rt0_go)
|
||||
sigpanicPC = funcPC(sigpanic)
|
||||
switchtoMPC = funcPC(switchtoM)
|
||||
}
|
||||
|
||||
// Traceback over the deferred function calls.
|
||||
|
Loading…
Reference in New Issue
Block a user